1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 #include "spdk/bdev_module.h" 12 13 #include "common/lib/ut_multithread.c" 14 15 #include "bdev/nvme/bdev_nvme.c" 16 17 #include "unit/lib/json_mock.c" 18 19 #include "bdev/nvme/bdev_mdns_client.c" 20 21 static void *g_accel_p = (void *)0xdeadbeaf; 22 23 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 24 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 25 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 26 spdk_nvme_remove_cb remove_cb), NULL); 27 28 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 29 enum spdk_nvme_transport_type trtype)); 30 31 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 32 NULL); 33 34 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 35 36 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 37 struct spdk_nvme_transport_id *trid), 0); 38 39 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 40 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 41 42 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 65 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 66 67 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 68 enum spdk_bdev_reset_stat_mode mode)); 69 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 70 struct spdk_bdev_io_stat *add)); 71 72 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 73 74 int 75 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 76 struct spdk_memory_domain **domains, int array_size) 77 { 78 int i, min_array_size; 79 80 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 81 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 82 for (i = 0; i < min_array_size; i++) { 83 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 84 } 85 } 86 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 87 88 return 0; 89 } 90 91 struct spdk_io_channel * 92 spdk_accel_get_io_channel(void) 93 { 94 return spdk_get_io_channel(g_accel_p); 95 } 96 97 void 98 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 99 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 100 { 101 /* Avoid warning that opts is used uninitialised */ 102 memset(opts, 0, opts_size); 103 } 104 105 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 106 (struct spdk_nvme_ctrlr *ctrlr), NULL); 107 108 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 109 (const struct spdk_nvme_ctrlr *ctrlr), 0); 110 111 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 112 (struct spdk_nvme_ctrlr *ctrlr), NULL); 113 114 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 115 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 116 117 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 118 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 121 122 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 123 124 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 125 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 126 127 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 128 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 129 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 130 131 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 132 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 133 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 134 135 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 136 size_t *size), 0); 137 138 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 139 140 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 141 142 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 143 144 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 145 146 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 147 148 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 149 150 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 151 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 152 153 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 154 155 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 156 char *name, size_t *size), 0); 157 158 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 159 (struct spdk_nvme_ns *ns), 0); 160 161 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 162 (const struct spdk_nvme_ctrlr *ctrlr), 0); 163 164 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 165 (struct spdk_nvme_ns *ns), 0); 166 167 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 168 (struct spdk_nvme_ns *ns), 0); 169 170 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 171 (struct spdk_nvme_ns *ns), 0); 172 173 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 174 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 175 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 176 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 177 178 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 179 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 180 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 181 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 182 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 183 184 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 185 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 186 void *payload, uint32_t payload_size, uint64_t slba, 187 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 188 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 189 190 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 191 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 192 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 193 194 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 195 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 196 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 197 198 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 199 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 200 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 201 202 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 203 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 204 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 205 206 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 207 208 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 209 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 210 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 211 212 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 213 (const struct spdk_nvme_status *status), NULL); 214 215 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 216 (const struct spdk_nvme_status *status), NULL); 217 218 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 219 220 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 221 222 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 223 224 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 225 226 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 227 228 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 229 struct iovec *iov, 230 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 231 DEFINE_STUB(spdk_accel_append_crc32c, int, 232 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 233 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 234 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 235 DEFINE_STUB_V(spdk_accel_sequence_finish, 236 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 237 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 238 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 239 240 struct ut_nvme_req { 241 uint16_t opc; 242 spdk_nvme_cmd_cb cb_fn; 243 void *cb_arg; 244 struct spdk_nvme_cpl cpl; 245 TAILQ_ENTRY(ut_nvme_req) tailq; 246 }; 247 248 struct spdk_nvme_ns { 249 struct spdk_nvme_ctrlr *ctrlr; 250 uint32_t id; 251 bool is_active; 252 struct spdk_uuid *uuid; 253 enum spdk_nvme_ana_state ana_state; 254 enum spdk_nvme_csi csi; 255 }; 256 257 struct spdk_nvme_qpair { 258 struct spdk_nvme_ctrlr *ctrlr; 259 uint8_t failure_reason; 260 bool is_connected; 261 bool in_completion_context; 262 bool delete_after_completion_context; 263 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 264 uint32_t num_outstanding_reqs; 265 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 266 struct spdk_nvme_poll_group *poll_group; 267 void *poll_group_tailq_head; 268 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 269 }; 270 271 struct spdk_nvme_ctrlr { 272 uint32_t num_ns; 273 struct spdk_nvme_ns *ns; 274 struct spdk_nvme_ns_data *nsdata; 275 struct spdk_nvme_qpair adminq; 276 struct spdk_nvme_ctrlr_data cdata; 277 bool attached; 278 bool is_failed; 279 bool fail_reset; 280 bool is_removed; 281 struct spdk_nvme_transport_id trid; 282 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 283 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 284 struct spdk_nvme_ctrlr_opts opts; 285 }; 286 287 struct spdk_nvme_poll_group { 288 void *ctx; 289 struct spdk_nvme_accel_fn_table accel_fn_table; 290 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 291 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 292 }; 293 294 struct spdk_nvme_probe_ctx { 295 struct spdk_nvme_transport_id trid; 296 void *cb_ctx; 297 spdk_nvme_attach_cb attach_cb; 298 struct spdk_nvme_ctrlr *init_ctrlr; 299 }; 300 301 uint32_t 302 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 303 { 304 uint32_t nsid; 305 306 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 307 if (ctrlr->ns[nsid - 1].is_active) { 308 return nsid; 309 } 310 } 311 312 return 0; 313 } 314 315 uint32_t 316 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 317 { 318 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 319 if (ctrlr->ns[nsid - 1].is_active) { 320 return nsid; 321 } 322 } 323 324 return 0; 325 } 326 327 uint32_t 328 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 329 { 330 return qpair->num_outstanding_reqs; 331 } 332 333 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 334 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 335 g_ut_attached_ctrlrs); 336 static int g_ut_attach_ctrlr_status; 337 static size_t g_ut_attach_bdev_count; 338 static int g_ut_register_bdev_status; 339 static struct spdk_bdev *g_ut_registered_bdev; 340 static uint16_t g_ut_cntlid; 341 static struct nvme_path_id g_any_path = {}; 342 343 static void 344 ut_init_trid(struct spdk_nvme_transport_id *trid) 345 { 346 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 347 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 348 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 349 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 350 } 351 352 static void 353 ut_init_trid2(struct spdk_nvme_transport_id *trid) 354 { 355 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 356 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 357 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 358 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 359 } 360 361 static void 362 ut_init_trid3(struct spdk_nvme_transport_id *trid) 363 { 364 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 365 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 366 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 367 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 368 } 369 370 static int 371 cmp_int(int a, int b) 372 { 373 return a - b; 374 } 375 376 int 377 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 378 const struct spdk_nvme_transport_id *trid2) 379 { 380 int cmp; 381 382 /* We assume trtype is TCP for now. */ 383 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 384 385 cmp = cmp_int(trid1->trtype, trid2->trtype); 386 if (cmp) { 387 return cmp; 388 } 389 390 cmp = strcasecmp(trid1->traddr, trid2->traddr); 391 if (cmp) { 392 return cmp; 393 } 394 395 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 396 if (cmp) { 397 return cmp; 398 } 399 400 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 401 if (cmp) { 402 return cmp; 403 } 404 405 cmp = strcmp(trid1->subnqn, trid2->subnqn); 406 if (cmp) { 407 return cmp; 408 } 409 410 return 0; 411 } 412 413 static struct spdk_nvme_ctrlr * 414 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 415 bool ana_reporting, bool multipath) 416 { 417 struct spdk_nvme_ctrlr *ctrlr; 418 uint32_t i; 419 420 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 421 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 422 /* There is a ctrlr whose trid matches. */ 423 return NULL; 424 } 425 } 426 427 ctrlr = calloc(1, sizeof(*ctrlr)); 428 if (ctrlr == NULL) { 429 return NULL; 430 } 431 432 ctrlr->attached = true; 433 ctrlr->adminq.ctrlr = ctrlr; 434 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 435 ctrlr->adminq.is_connected = true; 436 437 if (num_ns != 0) { 438 ctrlr->num_ns = num_ns; 439 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 440 if (ctrlr->ns == NULL) { 441 free(ctrlr); 442 return NULL; 443 } 444 445 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 446 if (ctrlr->nsdata == NULL) { 447 free(ctrlr->ns); 448 free(ctrlr); 449 return NULL; 450 } 451 452 for (i = 0; i < num_ns; i++) { 453 ctrlr->ns[i].id = i + 1; 454 ctrlr->ns[i].ctrlr = ctrlr; 455 ctrlr->ns[i].is_active = true; 456 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 457 ctrlr->nsdata[i].nsze = 1024; 458 ctrlr->nsdata[i].nmic.can_share = multipath; 459 } 460 461 ctrlr->cdata.nn = num_ns; 462 ctrlr->cdata.mnan = num_ns; 463 ctrlr->cdata.nanagrpid = num_ns; 464 } 465 466 ctrlr->cdata.cntlid = ++g_ut_cntlid; 467 ctrlr->cdata.cmic.multi_ctrlr = multipath; 468 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 469 ctrlr->trid = *trid; 470 TAILQ_INIT(&ctrlr->active_io_qpairs); 471 472 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 473 474 return ctrlr; 475 } 476 477 static void 478 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 479 { 480 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 481 482 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 483 free(ctrlr->nsdata); 484 free(ctrlr->ns); 485 free(ctrlr); 486 } 487 488 static int 489 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 490 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 491 { 492 struct ut_nvme_req *req; 493 494 req = calloc(1, sizeof(*req)); 495 if (req == NULL) { 496 return -ENOMEM; 497 } 498 499 req->opc = opc; 500 req->cb_fn = cb_fn; 501 req->cb_arg = cb_arg; 502 503 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 504 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 505 506 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 507 qpair->num_outstanding_reqs++; 508 509 return 0; 510 } 511 512 static struct ut_nvme_req * 513 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 514 { 515 struct ut_nvme_req *req; 516 517 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 518 if (req->cb_arg == cb_arg) { 519 break; 520 } 521 } 522 523 return req; 524 } 525 526 static struct spdk_bdev_io * 527 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 528 struct spdk_io_channel *ch) 529 { 530 struct spdk_bdev_io *bdev_io; 531 532 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 533 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 534 bdev_io->type = type; 535 bdev_io->bdev = &nbdev->disk; 536 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 537 538 return bdev_io; 539 } 540 541 static void 542 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 543 { 544 bdev_io->u.bdev.iovs = &bdev_io->iov; 545 bdev_io->u.bdev.iovcnt = 1; 546 547 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 548 bdev_io->iov.iov_len = 4096; 549 } 550 551 static void 552 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 553 { 554 if (ctrlr->is_failed) { 555 free(ctrlr); 556 return; 557 } 558 559 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 560 if (probe_ctx->cb_ctx) { 561 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 562 } 563 564 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 565 566 if (probe_ctx->attach_cb) { 567 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 568 } 569 } 570 571 int 572 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 573 { 574 struct spdk_nvme_ctrlr *ctrlr, *tmp; 575 576 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 577 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 578 continue; 579 } 580 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 581 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 582 } 583 584 free(probe_ctx); 585 586 return 0; 587 } 588 589 struct spdk_nvme_probe_ctx * 590 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 591 const struct spdk_nvme_ctrlr_opts *opts, 592 spdk_nvme_attach_cb attach_cb) 593 { 594 struct spdk_nvme_probe_ctx *probe_ctx; 595 596 if (trid == NULL) { 597 return NULL; 598 } 599 600 probe_ctx = calloc(1, sizeof(*probe_ctx)); 601 if (probe_ctx == NULL) { 602 return NULL; 603 } 604 605 probe_ctx->trid = *trid; 606 probe_ctx->cb_ctx = (void *)opts; 607 probe_ctx->attach_cb = attach_cb; 608 609 return probe_ctx; 610 } 611 612 int 613 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 614 { 615 if (ctrlr->attached) { 616 ut_detach_ctrlr(ctrlr); 617 } 618 619 return 0; 620 } 621 622 int 623 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 624 { 625 SPDK_CU_ASSERT_FATAL(ctx != NULL); 626 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 627 628 return 0; 629 } 630 631 int 632 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 633 { 634 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 635 } 636 637 void 638 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 639 { 640 memset(opts, 0, opts_size); 641 642 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 643 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 644 } 645 646 const struct spdk_nvme_ctrlr_data * 647 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 648 { 649 return &ctrlr->cdata; 650 } 651 652 uint32_t 653 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 654 { 655 return ctrlr->num_ns; 656 } 657 658 struct spdk_nvme_ns * 659 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 660 { 661 if (nsid < 1 || nsid > ctrlr->num_ns) { 662 return NULL; 663 } 664 665 return &ctrlr->ns[nsid - 1]; 666 } 667 668 bool 669 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 670 { 671 if (nsid < 1 || nsid > ctrlr->num_ns) { 672 return false; 673 } 674 675 return ctrlr->ns[nsid - 1].is_active; 676 } 677 678 union spdk_nvme_csts_register 679 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 680 { 681 union spdk_nvme_csts_register csts; 682 683 csts.raw = 0; 684 685 return csts; 686 } 687 688 union spdk_nvme_vs_register 689 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 690 { 691 union spdk_nvme_vs_register vs; 692 693 vs.raw = 0; 694 695 return vs; 696 } 697 698 struct spdk_nvme_qpair * 699 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 700 const struct spdk_nvme_io_qpair_opts *user_opts, 701 size_t opts_size) 702 { 703 struct spdk_nvme_qpair *qpair; 704 705 qpair = calloc(1, sizeof(*qpair)); 706 if (qpair == NULL) { 707 return NULL; 708 } 709 710 qpair->ctrlr = ctrlr; 711 TAILQ_INIT(&qpair->outstanding_reqs); 712 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 713 714 return qpair; 715 } 716 717 static void 718 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 719 { 720 struct spdk_nvme_poll_group *group = qpair->poll_group; 721 722 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 723 724 qpair->poll_group_tailq_head = &group->connected_qpairs; 725 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 726 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 727 } 728 729 static void 730 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 731 { 732 struct spdk_nvme_poll_group *group = qpair->poll_group; 733 734 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 735 736 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 737 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 738 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 739 } 740 741 int 742 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 743 struct spdk_nvme_qpair *qpair) 744 { 745 if (qpair->is_connected) { 746 return -EISCONN; 747 } 748 749 qpair->is_connected = true; 750 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 751 752 if (qpair->poll_group) { 753 nvme_poll_group_connect_qpair(qpair); 754 } 755 756 return 0; 757 } 758 759 void 760 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 761 { 762 if (!qpair->is_connected) { 763 return; 764 } 765 766 qpair->is_connected = false; 767 768 if (qpair->poll_group != NULL) { 769 nvme_poll_group_disconnect_qpair(qpair); 770 } 771 } 772 773 int 774 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 775 { 776 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 777 778 if (qpair->in_completion_context) { 779 qpair->delete_after_completion_context = true; 780 return 0; 781 } 782 783 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 784 785 if (qpair->poll_group != NULL) { 786 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 787 } 788 789 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 790 791 CU_ASSERT(qpair->num_outstanding_reqs == 0); 792 793 free(qpair); 794 795 return 0; 796 } 797 798 int 799 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 800 { 801 if (ctrlr->fail_reset) { 802 ctrlr->is_failed = true; 803 return -EIO; 804 } 805 806 ctrlr->adminq.is_connected = true; 807 return 0; 808 } 809 810 void 811 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 812 { 813 } 814 815 int 816 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 817 { 818 if (ctrlr->is_removed) { 819 return -ENXIO; 820 } 821 822 ctrlr->adminq.is_connected = false; 823 ctrlr->is_failed = false; 824 825 return 0; 826 } 827 828 void 829 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 830 { 831 ctrlr->is_failed = true; 832 } 833 834 bool 835 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 836 { 837 return ctrlr->is_failed; 838 } 839 840 spdk_nvme_qp_failure_reason 841 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 842 { 843 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 844 } 845 846 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 847 sizeof(uint32_t)) 848 static void 849 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 850 { 851 struct spdk_nvme_ana_page ana_hdr; 852 char _ana_desc[UT_ANA_DESC_SIZE]; 853 struct spdk_nvme_ana_group_descriptor *ana_desc; 854 struct spdk_nvme_ns *ns; 855 uint32_t i; 856 857 memset(&ana_hdr, 0, sizeof(ana_hdr)); 858 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 859 860 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 861 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 862 863 buf += sizeof(ana_hdr); 864 length -= sizeof(ana_hdr); 865 866 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 867 868 for (i = 0; i < ctrlr->num_ns; i++) { 869 ns = &ctrlr->ns[i]; 870 871 if (!ns->is_active) { 872 continue; 873 } 874 875 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 876 877 ana_desc->ana_group_id = ns->id; 878 ana_desc->num_of_nsid = 1; 879 ana_desc->ana_state = ns->ana_state; 880 ana_desc->nsid[0] = ns->id; 881 882 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 883 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 884 885 buf += UT_ANA_DESC_SIZE; 886 length -= UT_ANA_DESC_SIZE; 887 } 888 } 889 890 int 891 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 892 uint8_t log_page, uint32_t nsid, 893 void *payload, uint32_t payload_size, 894 uint64_t offset, 895 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 896 { 897 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 898 SPDK_CU_ASSERT_FATAL(offset == 0); 899 ut_create_ana_log_page(ctrlr, payload, payload_size); 900 } 901 902 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 903 cb_fn, cb_arg); 904 } 905 906 int 907 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 908 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 909 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 910 { 911 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 912 } 913 914 int 915 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 916 void *cmd_cb_arg, 917 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 918 { 919 struct ut_nvme_req *req = NULL, *abort_req; 920 921 if (qpair == NULL) { 922 qpair = &ctrlr->adminq; 923 } 924 925 abort_req = calloc(1, sizeof(*abort_req)); 926 if (abort_req == NULL) { 927 return -ENOMEM; 928 } 929 930 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 931 if (req->cb_arg == cmd_cb_arg) { 932 break; 933 } 934 } 935 936 if (req == NULL) { 937 free(abort_req); 938 return -ENOENT; 939 } 940 941 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 942 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 943 944 abort_req->opc = SPDK_NVME_OPC_ABORT; 945 abort_req->cb_fn = cb_fn; 946 abort_req->cb_arg = cb_arg; 947 948 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 949 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 950 abort_req->cpl.cdw0 = 0; 951 952 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 953 ctrlr->adminq.num_outstanding_reqs++; 954 955 return 0; 956 } 957 958 int32_t 959 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 960 { 961 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 962 } 963 964 uint32_t 965 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 966 { 967 return ns->id; 968 } 969 970 struct spdk_nvme_ctrlr * 971 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 972 { 973 return ns->ctrlr; 974 } 975 976 static inline struct spdk_nvme_ns_data * 977 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 978 { 979 return &ns->ctrlr->nsdata[ns->id - 1]; 980 } 981 982 const struct spdk_nvme_ns_data * 983 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 984 { 985 return _nvme_ns_get_data(ns); 986 } 987 988 uint64_t 989 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 990 { 991 return _nvme_ns_get_data(ns)->nsze; 992 } 993 994 const struct spdk_uuid * 995 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 996 { 997 return ns->uuid; 998 } 999 1000 enum spdk_nvme_csi 1001 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1002 return ns->csi; 1003 } 1004 1005 int 1006 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1007 void *metadata, uint64_t lba, uint32_t lba_count, 1008 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1009 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1010 { 1011 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1012 } 1013 1014 int 1015 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1016 void *buffer, void *metadata, uint64_t lba, 1017 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1018 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1019 { 1020 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1021 } 1022 1023 int 1024 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1025 uint64_t lba, uint32_t lba_count, 1026 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1027 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1028 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1029 uint16_t apptag_mask, uint16_t apptag) 1030 { 1031 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1032 } 1033 1034 int 1035 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1036 uint64_t lba, uint32_t lba_count, 1037 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1038 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1039 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1040 uint16_t apptag_mask, uint16_t apptag) 1041 { 1042 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1043 } 1044 1045 static bool g_ut_readv_ext_called; 1046 int 1047 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1048 uint64_t lba, uint32_t lba_count, 1049 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1050 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1051 spdk_nvme_req_next_sge_cb next_sge_fn, 1052 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1053 { 1054 g_ut_readv_ext_called = true; 1055 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1056 } 1057 1058 static bool g_ut_writev_ext_called; 1059 int 1060 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1061 uint64_t lba, uint32_t lba_count, 1062 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1063 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1064 spdk_nvme_req_next_sge_cb next_sge_fn, 1065 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1066 { 1067 g_ut_writev_ext_called = true; 1068 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1069 } 1070 1071 int 1072 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1073 uint64_t lba, uint32_t lba_count, 1074 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1075 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1076 spdk_nvme_req_next_sge_cb next_sge_fn, 1077 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1078 { 1079 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1080 } 1081 1082 int 1083 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1084 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1085 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1086 { 1087 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1088 } 1089 1090 int 1091 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1092 uint64_t lba, uint32_t lba_count, 1093 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1094 uint32_t io_flags) 1095 { 1096 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1097 } 1098 1099 int 1100 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1101 const struct spdk_nvme_scc_source_range *ranges, 1102 uint16_t num_ranges, uint64_t dest_lba, 1103 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1104 { 1105 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1106 } 1107 1108 struct spdk_nvme_poll_group * 1109 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1110 { 1111 struct spdk_nvme_poll_group *group; 1112 1113 group = calloc(1, sizeof(*group)); 1114 if (group == NULL) { 1115 return NULL; 1116 } 1117 1118 group->ctx = ctx; 1119 if (table != NULL) { 1120 group->accel_fn_table = *table; 1121 } 1122 TAILQ_INIT(&group->connected_qpairs); 1123 TAILQ_INIT(&group->disconnected_qpairs); 1124 1125 return group; 1126 } 1127 1128 int 1129 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1130 { 1131 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1132 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1133 return -EBUSY; 1134 } 1135 1136 free(group); 1137 1138 return 0; 1139 } 1140 1141 spdk_nvme_qp_failure_reason 1142 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1143 { 1144 return qpair->failure_reason; 1145 } 1146 1147 bool 1148 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1149 { 1150 return qpair->is_connected; 1151 } 1152 1153 int32_t 1154 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1155 uint32_t max_completions) 1156 { 1157 struct ut_nvme_req *req, *tmp; 1158 uint32_t num_completions = 0; 1159 1160 if (!qpair->is_connected) { 1161 return -ENXIO; 1162 } 1163 1164 qpair->in_completion_context = true; 1165 1166 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1167 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1168 qpair->num_outstanding_reqs--; 1169 1170 req->cb_fn(req->cb_arg, &req->cpl); 1171 1172 free(req); 1173 num_completions++; 1174 } 1175 1176 qpair->in_completion_context = false; 1177 if (qpair->delete_after_completion_context) { 1178 spdk_nvme_ctrlr_free_io_qpair(qpair); 1179 } 1180 1181 return num_completions; 1182 } 1183 1184 int64_t 1185 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1186 uint32_t completions_per_qpair, 1187 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1188 { 1189 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1190 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1191 1192 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1193 1194 if (disconnected_qpair_cb == NULL) { 1195 return -EINVAL; 1196 } 1197 1198 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1199 disconnected_qpair_cb(qpair, group->ctx); 1200 } 1201 1202 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1203 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1204 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1205 /* Bump the number of completions so this counts as "busy" */ 1206 num_completions++; 1207 continue; 1208 } 1209 1210 local_completions = spdk_nvme_qpair_process_completions(qpair, 1211 completions_per_qpair); 1212 if (local_completions < 0 && error_reason == 0) { 1213 error_reason = local_completions; 1214 } else { 1215 num_completions += local_completions; 1216 assert(num_completions >= 0); 1217 } 1218 } 1219 1220 return error_reason ? error_reason : num_completions; 1221 } 1222 1223 int 1224 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1225 struct spdk_nvme_qpair *qpair) 1226 { 1227 CU_ASSERT(!qpair->is_connected); 1228 1229 qpair->poll_group = group; 1230 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1231 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1232 1233 return 0; 1234 } 1235 1236 int 1237 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1238 struct spdk_nvme_qpair *qpair) 1239 { 1240 CU_ASSERT(!qpair->is_connected); 1241 1242 if (qpair->poll_group == NULL) { 1243 return -ENOENT; 1244 } 1245 1246 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1247 1248 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1249 1250 qpair->poll_group = NULL; 1251 qpair->poll_group_tailq_head = NULL; 1252 1253 return 0; 1254 } 1255 1256 int 1257 spdk_bdev_register(struct spdk_bdev *bdev) 1258 { 1259 g_ut_registered_bdev = bdev; 1260 1261 return g_ut_register_bdev_status; 1262 } 1263 1264 void 1265 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1266 { 1267 int rc; 1268 1269 rc = bdev->fn_table->destruct(bdev->ctxt); 1270 1271 if (bdev == g_ut_registered_bdev) { 1272 g_ut_registered_bdev = NULL; 1273 } 1274 1275 if (rc <= 0 && cb_fn != NULL) { 1276 cb_fn(cb_arg, rc); 1277 } 1278 } 1279 1280 int 1281 spdk_bdev_open_ext(const char *bdev_name, bool write, 1282 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1283 struct spdk_bdev_desc **desc) 1284 { 1285 if (g_ut_registered_bdev == NULL || 1286 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1287 return -ENODEV; 1288 } 1289 1290 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1291 1292 return 0; 1293 } 1294 1295 struct spdk_bdev * 1296 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1297 { 1298 return (struct spdk_bdev *)desc; 1299 } 1300 1301 int 1302 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1303 { 1304 bdev->blockcnt = size; 1305 1306 return 0; 1307 } 1308 1309 struct spdk_io_channel * 1310 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1311 { 1312 return (struct spdk_io_channel *)bdev_io->internal.ch; 1313 } 1314 1315 struct spdk_thread * 1316 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1317 { 1318 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1319 } 1320 1321 void 1322 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1323 { 1324 bdev_io->internal.status = status; 1325 bdev_io->internal.in_submit_request = false; 1326 } 1327 1328 void 1329 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1330 { 1331 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1332 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1333 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1334 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1335 } else { 1336 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1337 } 1338 1339 bdev_io->internal.error.nvme.cdw0 = cdw0; 1340 bdev_io->internal.error.nvme.sct = sct; 1341 bdev_io->internal.error.nvme.sc = sc; 1342 1343 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1344 } 1345 1346 void 1347 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1348 { 1349 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1350 1351 ut_bdev_io_set_buf(bdev_io); 1352 1353 cb(ch, bdev_io, true); 1354 } 1355 1356 static void 1357 test_create_ctrlr(void) 1358 { 1359 struct spdk_nvme_transport_id trid = {}; 1360 struct spdk_nvme_ctrlr ctrlr = {}; 1361 int rc; 1362 1363 ut_init_trid(&trid); 1364 1365 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1366 CU_ASSERT(rc == 0); 1367 1368 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1369 1370 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1371 CU_ASSERT(rc == 0); 1372 1373 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1374 1375 poll_threads(); 1376 spdk_delay_us(1000); 1377 poll_threads(); 1378 1379 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1380 } 1381 1382 static void 1383 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1384 { 1385 bool *detect_remove = cb_arg; 1386 1387 CU_ASSERT(rc != 0); 1388 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1389 1390 *detect_remove = true; 1391 } 1392 1393 static void 1394 test_reset_ctrlr(void) 1395 { 1396 struct spdk_nvme_transport_id trid = {}; 1397 struct spdk_nvme_ctrlr ctrlr = {}; 1398 struct nvme_ctrlr *nvme_ctrlr = NULL; 1399 struct nvme_path_id *curr_trid; 1400 struct spdk_io_channel *ch1, *ch2; 1401 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1402 bool detect_remove; 1403 int rc; 1404 1405 ut_init_trid(&trid); 1406 TAILQ_INIT(&ctrlr.active_io_qpairs); 1407 1408 set_thread(0); 1409 1410 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1411 CU_ASSERT(rc == 0); 1412 1413 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1414 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1415 1416 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1417 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1418 1419 ch1 = spdk_get_io_channel(nvme_ctrlr); 1420 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1421 1422 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1423 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1424 1425 set_thread(1); 1426 1427 ch2 = spdk_get_io_channel(nvme_ctrlr); 1428 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1429 1430 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1431 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1432 1433 /* Reset starts from thread 1. */ 1434 set_thread(1); 1435 1436 /* Case 1: ctrlr is already being destructed. */ 1437 nvme_ctrlr->destruct = true; 1438 1439 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1440 CU_ASSERT(rc == -ENXIO); 1441 1442 /* Case 2: reset is in progress. */ 1443 nvme_ctrlr->destruct = false; 1444 nvme_ctrlr->resetting = true; 1445 1446 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1447 CU_ASSERT(rc == -EBUSY); 1448 1449 /* Case 3: reset completes successfully. */ 1450 nvme_ctrlr->resetting = false; 1451 curr_trid->last_failed_tsc = spdk_get_ticks(); 1452 ctrlr.is_failed = true; 1453 1454 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1455 CU_ASSERT(rc == 0); 1456 CU_ASSERT(nvme_ctrlr->resetting == true); 1457 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1458 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1459 1460 poll_thread_times(0, 3); 1461 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1462 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1463 1464 poll_thread_times(0, 1); 1465 poll_thread_times(1, 1); 1466 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1467 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1468 CU_ASSERT(ctrlr.is_failed == true); 1469 1470 poll_thread_times(1, 1); 1471 poll_thread_times(0, 1); 1472 CU_ASSERT(ctrlr.is_failed == false); 1473 CU_ASSERT(ctrlr.adminq.is_connected == false); 1474 1475 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1476 poll_thread_times(0, 2); 1477 CU_ASSERT(ctrlr.adminq.is_connected == true); 1478 1479 poll_thread_times(0, 1); 1480 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1481 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1482 1483 poll_thread_times(1, 1); 1484 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1485 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1486 CU_ASSERT(nvme_ctrlr->resetting == true); 1487 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1488 1489 poll_thread_times(0, 2); 1490 CU_ASSERT(nvme_ctrlr->resetting == true); 1491 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1492 poll_thread_times(1, 1); 1493 CU_ASSERT(nvme_ctrlr->resetting == true); 1494 poll_thread_times(0, 1); 1495 CU_ASSERT(nvme_ctrlr->resetting == false); 1496 1497 /* Case 4: ctrlr is already removed. */ 1498 ctrlr.is_removed = true; 1499 1500 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1501 CU_ASSERT(rc == 0); 1502 1503 detect_remove = false; 1504 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1505 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1506 1507 poll_threads(); 1508 1509 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1510 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1511 CU_ASSERT(detect_remove == true); 1512 1513 ctrlr.is_removed = false; 1514 1515 spdk_put_io_channel(ch2); 1516 1517 set_thread(0); 1518 1519 spdk_put_io_channel(ch1); 1520 1521 poll_threads(); 1522 1523 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1524 CU_ASSERT(rc == 0); 1525 1526 poll_threads(); 1527 spdk_delay_us(1000); 1528 poll_threads(); 1529 1530 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1531 } 1532 1533 static void 1534 test_race_between_reset_and_destruct_ctrlr(void) 1535 { 1536 struct spdk_nvme_transport_id trid = {}; 1537 struct spdk_nvme_ctrlr ctrlr = {}; 1538 struct nvme_ctrlr *nvme_ctrlr; 1539 struct spdk_io_channel *ch1, *ch2; 1540 int rc; 1541 1542 ut_init_trid(&trid); 1543 TAILQ_INIT(&ctrlr.active_io_qpairs); 1544 1545 set_thread(0); 1546 1547 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1548 CU_ASSERT(rc == 0); 1549 1550 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1551 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1552 1553 ch1 = spdk_get_io_channel(nvme_ctrlr); 1554 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1555 1556 set_thread(1); 1557 1558 ch2 = spdk_get_io_channel(nvme_ctrlr); 1559 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1560 1561 /* Reset starts from thread 1. */ 1562 set_thread(1); 1563 1564 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1565 CU_ASSERT(rc == 0); 1566 CU_ASSERT(nvme_ctrlr->resetting == true); 1567 1568 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1569 set_thread(0); 1570 1571 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1572 CU_ASSERT(rc == 0); 1573 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1574 CU_ASSERT(nvme_ctrlr->destruct == true); 1575 CU_ASSERT(nvme_ctrlr->resetting == true); 1576 1577 poll_threads(); 1578 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1579 poll_threads(); 1580 1581 /* Reset completed but ctrlr is not still destructed yet. */ 1582 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1583 CU_ASSERT(nvme_ctrlr->destruct == true); 1584 CU_ASSERT(nvme_ctrlr->resetting == false); 1585 1586 /* New reset request is rejected. */ 1587 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1588 CU_ASSERT(rc == -ENXIO); 1589 1590 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1591 * However there are two channels and destruct is not completed yet. 1592 */ 1593 poll_threads(); 1594 1595 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1596 1597 set_thread(0); 1598 1599 spdk_put_io_channel(ch1); 1600 1601 set_thread(1); 1602 1603 spdk_put_io_channel(ch2); 1604 1605 poll_threads(); 1606 spdk_delay_us(1000); 1607 poll_threads(); 1608 1609 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1610 } 1611 1612 static void 1613 test_failover_ctrlr(void) 1614 { 1615 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1616 struct spdk_nvme_ctrlr ctrlr = {}; 1617 struct nvme_ctrlr *nvme_ctrlr = NULL; 1618 struct nvme_path_id *curr_trid, *next_trid; 1619 struct spdk_io_channel *ch1, *ch2; 1620 int rc; 1621 1622 ut_init_trid(&trid1); 1623 ut_init_trid2(&trid2); 1624 TAILQ_INIT(&ctrlr.active_io_qpairs); 1625 1626 set_thread(0); 1627 1628 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1629 CU_ASSERT(rc == 0); 1630 1631 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1632 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1633 1634 ch1 = spdk_get_io_channel(nvme_ctrlr); 1635 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1636 1637 set_thread(1); 1638 1639 ch2 = spdk_get_io_channel(nvme_ctrlr); 1640 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1641 1642 /* First, test one trid case. */ 1643 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1644 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1645 1646 /* Failover starts from thread 1. */ 1647 set_thread(1); 1648 1649 /* Case 1: ctrlr is already being destructed. */ 1650 nvme_ctrlr->destruct = true; 1651 1652 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false); 1653 CU_ASSERT(rc == -ENXIO); 1654 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1655 1656 /* Case 2: reset is in progress. */ 1657 nvme_ctrlr->destruct = false; 1658 nvme_ctrlr->resetting = true; 1659 1660 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false); 1661 CU_ASSERT(rc == -EINPROGRESS); 1662 1663 /* Case 3: reset completes successfully. */ 1664 nvme_ctrlr->resetting = false; 1665 1666 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false); 1667 CU_ASSERT(rc == 0); 1668 1669 CU_ASSERT(nvme_ctrlr->resetting == true); 1670 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1671 1672 poll_threads(); 1673 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1674 poll_threads(); 1675 1676 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1677 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1678 1679 CU_ASSERT(nvme_ctrlr->resetting == false); 1680 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1681 1682 set_thread(0); 1683 1684 /* Second, test two trids case. */ 1685 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1686 CU_ASSERT(rc == 0); 1687 1688 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1689 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1690 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1691 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1692 1693 /* Failover starts from thread 1. */ 1694 set_thread(1); 1695 1696 /* Case 4: reset is in progress. */ 1697 nvme_ctrlr->resetting = true; 1698 1699 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false); 1700 CU_ASSERT(rc == -EINPROGRESS); 1701 1702 /* Case 5: failover completes successfully. */ 1703 nvme_ctrlr->resetting = false; 1704 1705 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false); 1706 CU_ASSERT(rc == 0); 1707 1708 CU_ASSERT(nvme_ctrlr->resetting == true); 1709 1710 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1711 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1712 CU_ASSERT(next_trid != curr_trid); 1713 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1714 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1715 1716 poll_threads(); 1717 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1718 poll_threads(); 1719 1720 CU_ASSERT(nvme_ctrlr->resetting == false); 1721 1722 spdk_put_io_channel(ch2); 1723 1724 set_thread(0); 1725 1726 spdk_put_io_channel(ch1); 1727 1728 poll_threads(); 1729 1730 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1731 CU_ASSERT(rc == 0); 1732 1733 poll_threads(); 1734 spdk_delay_us(1000); 1735 poll_threads(); 1736 1737 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1738 } 1739 1740 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1741 * 1742 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1743 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1744 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1745 * have been active, i.e., the head of the list until the failover completed. 1746 * However trid3 was inserted to the head of the list by mistake. 1747 * 1748 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1749 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1750 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1751 * may be executed repeatedly before failover is executed. Hence this bug is real. 1752 * 1753 * The following test verifies the fix. 1754 */ 1755 static void 1756 test_race_between_failover_and_add_secondary_trid(void) 1757 { 1758 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1759 struct spdk_nvme_ctrlr ctrlr = {}; 1760 struct nvme_ctrlr *nvme_ctrlr = NULL; 1761 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1762 struct spdk_io_channel *ch1, *ch2; 1763 int rc; 1764 1765 ut_init_trid(&trid1); 1766 ut_init_trid2(&trid2); 1767 ut_init_trid3(&trid3); 1768 TAILQ_INIT(&ctrlr.active_io_qpairs); 1769 1770 set_thread(0); 1771 1772 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1773 CU_ASSERT(rc == 0); 1774 1775 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1776 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1777 1778 ch1 = spdk_get_io_channel(nvme_ctrlr); 1779 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1780 1781 set_thread(1); 1782 1783 ch2 = spdk_get_io_channel(nvme_ctrlr); 1784 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1785 1786 set_thread(0); 1787 1788 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1789 CU_ASSERT(rc == 0); 1790 1791 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1792 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1793 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1794 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1795 path_id2 = TAILQ_NEXT(path_id1, link); 1796 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1797 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1798 1799 ctrlr.fail_reset = true; 1800 1801 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1802 CU_ASSERT(rc == 0); 1803 1804 poll_threads(); 1805 1806 CU_ASSERT(path_id1->last_failed_tsc != 0); 1807 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1808 1809 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1810 CU_ASSERT(rc == 0); 1811 1812 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1813 CU_ASSERT(rc == 0); 1814 1815 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1816 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1817 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1818 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1819 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1820 path_id3 = TAILQ_NEXT(path_id2, link); 1821 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1822 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1823 1824 poll_threads(); 1825 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1826 poll_threads(); 1827 1828 spdk_put_io_channel(ch1); 1829 1830 set_thread(1); 1831 1832 spdk_put_io_channel(ch2); 1833 1834 poll_threads(); 1835 1836 set_thread(0); 1837 1838 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1839 CU_ASSERT(rc == 0); 1840 1841 poll_threads(); 1842 spdk_delay_us(1000); 1843 poll_threads(); 1844 1845 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1846 } 1847 1848 static void 1849 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1850 { 1851 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1852 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1853 } 1854 1855 static void 1856 test_pending_reset(void) 1857 { 1858 struct spdk_nvme_transport_id trid = {}; 1859 struct spdk_nvme_ctrlr *ctrlr; 1860 struct nvme_ctrlr *nvme_ctrlr = NULL; 1861 const int STRING_SIZE = 32; 1862 const char *attached_names[STRING_SIZE]; 1863 struct nvme_bdev *bdev; 1864 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1865 struct spdk_io_channel *ch1, *ch2; 1866 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1867 struct nvme_io_path *io_path1, *io_path2; 1868 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1869 int rc; 1870 1871 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1872 ut_init_trid(&trid); 1873 1874 set_thread(0); 1875 1876 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1877 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1878 1879 g_ut_attach_ctrlr_status = 0; 1880 g_ut_attach_bdev_count = 1; 1881 1882 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1883 attach_ctrlr_done, NULL, NULL, NULL, false); 1884 CU_ASSERT(rc == 0); 1885 1886 spdk_delay_us(1000); 1887 poll_threads(); 1888 1889 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1890 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1891 1892 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1893 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1894 1895 ch1 = spdk_get_io_channel(bdev); 1896 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1897 1898 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1899 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1900 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1901 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1902 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1903 1904 set_thread(1); 1905 1906 ch2 = spdk_get_io_channel(bdev); 1907 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1908 1909 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1910 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1911 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1912 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1913 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1914 1915 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1916 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1917 1918 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1919 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1920 1921 /* The first reset request is submitted on thread 1, and the second reset request 1922 * is submitted on thread 0 while processing the first request. 1923 */ 1924 bdev_nvme_submit_request(ch2, first_bdev_io); 1925 CU_ASSERT(nvme_ctrlr->resetting == true); 1926 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1927 1928 set_thread(0); 1929 1930 bdev_nvme_submit_request(ch1, second_bdev_io); 1931 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1932 1933 poll_threads(); 1934 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1935 poll_threads(); 1936 1937 CU_ASSERT(nvme_ctrlr->resetting == false); 1938 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1939 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1940 1941 /* The first reset request is submitted on thread 1, and the second reset request 1942 * is submitted on thread 0 while processing the first request. 1943 * 1944 * The difference from the above scenario is that the controller is removed while 1945 * processing the first request. Hence both reset requests should fail. 1946 */ 1947 set_thread(1); 1948 1949 bdev_nvme_submit_request(ch2, first_bdev_io); 1950 CU_ASSERT(nvme_ctrlr->resetting == true); 1951 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1952 1953 set_thread(0); 1954 1955 bdev_nvme_submit_request(ch1, second_bdev_io); 1956 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1957 1958 ctrlr->fail_reset = true; 1959 1960 poll_threads(); 1961 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1962 poll_threads(); 1963 1964 CU_ASSERT(nvme_ctrlr->resetting == false); 1965 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1966 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1967 1968 spdk_put_io_channel(ch1); 1969 1970 set_thread(1); 1971 1972 spdk_put_io_channel(ch2); 1973 1974 poll_threads(); 1975 1976 set_thread(0); 1977 1978 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1979 CU_ASSERT(rc == 0); 1980 1981 poll_threads(); 1982 spdk_delay_us(1000); 1983 poll_threads(); 1984 1985 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1986 1987 free(first_bdev_io); 1988 free(second_bdev_io); 1989 } 1990 1991 static void 1992 test_attach_ctrlr(void) 1993 { 1994 struct spdk_nvme_transport_id trid = {}; 1995 struct spdk_nvme_ctrlr *ctrlr; 1996 struct nvme_ctrlr *nvme_ctrlr; 1997 const int STRING_SIZE = 32; 1998 const char *attached_names[STRING_SIZE]; 1999 struct nvme_bdev *nbdev; 2000 int rc; 2001 2002 set_thread(0); 2003 2004 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2005 ut_init_trid(&trid); 2006 2007 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2008 * by probe polling. 2009 */ 2010 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2011 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2012 2013 ctrlr->is_failed = true; 2014 g_ut_attach_ctrlr_status = -EIO; 2015 g_ut_attach_bdev_count = 0; 2016 2017 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2018 attach_ctrlr_done, NULL, NULL, NULL, false); 2019 CU_ASSERT(rc == 0); 2020 2021 spdk_delay_us(1000); 2022 poll_threads(); 2023 2024 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2025 2026 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2027 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2028 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2029 2030 g_ut_attach_ctrlr_status = 0; 2031 2032 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2033 attach_ctrlr_done, NULL, NULL, NULL, false); 2034 CU_ASSERT(rc == 0); 2035 2036 spdk_delay_us(1000); 2037 poll_threads(); 2038 2039 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2040 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2041 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2042 2043 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2044 CU_ASSERT(rc == 0); 2045 2046 poll_threads(); 2047 spdk_delay_us(1000); 2048 poll_threads(); 2049 2050 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2051 2052 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2053 * one nvme_bdev is created. 2054 */ 2055 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2056 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2057 2058 g_ut_attach_bdev_count = 1; 2059 2060 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2061 attach_ctrlr_done, NULL, NULL, NULL, false); 2062 CU_ASSERT(rc == 0); 2063 2064 spdk_delay_us(1000); 2065 poll_threads(); 2066 2067 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2068 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2069 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2070 2071 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2072 attached_names[0] = NULL; 2073 2074 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2075 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2076 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2077 2078 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2079 CU_ASSERT(rc == 0); 2080 2081 poll_threads(); 2082 spdk_delay_us(1000); 2083 poll_threads(); 2084 2085 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2086 2087 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2088 * created because creating one nvme_bdev failed. 2089 */ 2090 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2091 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2092 2093 g_ut_register_bdev_status = -EINVAL; 2094 g_ut_attach_bdev_count = 0; 2095 2096 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2097 attach_ctrlr_done, NULL, NULL, NULL, false); 2098 CU_ASSERT(rc == 0); 2099 2100 spdk_delay_us(1000); 2101 poll_threads(); 2102 2103 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2104 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2105 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2106 2107 CU_ASSERT(attached_names[0] == NULL); 2108 2109 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2110 CU_ASSERT(rc == 0); 2111 2112 poll_threads(); 2113 spdk_delay_us(1000); 2114 poll_threads(); 2115 2116 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2117 2118 g_ut_register_bdev_status = 0; 2119 } 2120 2121 static void 2122 test_aer_cb(void) 2123 { 2124 struct spdk_nvme_transport_id trid = {}; 2125 struct spdk_nvme_ctrlr *ctrlr; 2126 struct nvme_ctrlr *nvme_ctrlr; 2127 struct nvme_bdev *bdev; 2128 const int STRING_SIZE = 32; 2129 const char *attached_names[STRING_SIZE]; 2130 union spdk_nvme_async_event_completion event = {}; 2131 struct spdk_nvme_cpl cpl = {}; 2132 int rc; 2133 2134 set_thread(0); 2135 2136 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2137 ut_init_trid(&trid); 2138 2139 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2140 * namespaces are populated. 2141 */ 2142 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2143 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2144 2145 ctrlr->ns[0].is_active = false; 2146 2147 g_ut_attach_ctrlr_status = 0; 2148 g_ut_attach_bdev_count = 3; 2149 2150 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2151 attach_ctrlr_done, NULL, NULL, NULL, false); 2152 CU_ASSERT(rc == 0); 2153 2154 spdk_delay_us(1000); 2155 poll_threads(); 2156 2157 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2158 poll_threads(); 2159 2160 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2161 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2162 2163 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2164 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2165 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2166 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2167 2168 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2169 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2170 CU_ASSERT(bdev->disk.blockcnt == 1024); 2171 2172 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2173 * change the size of the 4th namespace. 2174 */ 2175 ctrlr->ns[0].is_active = true; 2176 ctrlr->ns[2].is_active = false; 2177 ctrlr->nsdata[3].nsze = 2048; 2178 2179 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2180 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2181 cpl.cdw0 = event.raw; 2182 2183 aer_cb(nvme_ctrlr, &cpl); 2184 2185 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2186 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2187 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2188 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2189 CU_ASSERT(bdev->disk.blockcnt == 2048); 2190 2191 /* Change ANA state of active namespaces. */ 2192 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2193 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2194 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2195 2196 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2197 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2198 cpl.cdw0 = event.raw; 2199 2200 aer_cb(nvme_ctrlr, &cpl); 2201 2202 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2203 poll_threads(); 2204 2205 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2206 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2207 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2208 2209 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2210 CU_ASSERT(rc == 0); 2211 2212 poll_threads(); 2213 spdk_delay_us(1000); 2214 poll_threads(); 2215 2216 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2217 } 2218 2219 static void 2220 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2221 enum spdk_bdev_io_type io_type) 2222 { 2223 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2224 struct nvme_io_path *io_path; 2225 struct spdk_nvme_qpair *qpair; 2226 2227 io_path = bdev_nvme_find_io_path(nbdev_ch); 2228 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2229 qpair = io_path->qpair->qpair; 2230 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2231 2232 bdev_io->type = io_type; 2233 bdev_io->internal.in_submit_request = true; 2234 2235 bdev_nvme_submit_request(ch, bdev_io); 2236 2237 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2238 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2239 2240 poll_threads(); 2241 2242 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2243 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2244 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2245 } 2246 2247 static void 2248 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2249 enum spdk_bdev_io_type io_type) 2250 { 2251 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2252 struct nvme_io_path *io_path; 2253 struct spdk_nvme_qpair *qpair; 2254 2255 io_path = bdev_nvme_find_io_path(nbdev_ch); 2256 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2257 qpair = io_path->qpair->qpair; 2258 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2259 2260 bdev_io->type = io_type; 2261 bdev_io->internal.in_submit_request = true; 2262 2263 bdev_nvme_submit_request(ch, bdev_io); 2264 2265 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2266 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2267 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2268 } 2269 2270 static void 2271 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2272 { 2273 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2274 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2275 struct ut_nvme_req *req; 2276 struct nvme_io_path *io_path; 2277 struct spdk_nvme_qpair *qpair; 2278 2279 io_path = bdev_nvme_find_io_path(nbdev_ch); 2280 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2281 qpair = io_path->qpair->qpair; 2282 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2283 2284 /* Only compare and write now. */ 2285 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2286 bdev_io->internal.in_submit_request = true; 2287 2288 bdev_nvme_submit_request(ch, bdev_io); 2289 2290 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2291 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2292 CU_ASSERT(bio->first_fused_submitted == true); 2293 2294 /* First outstanding request is compare operation. */ 2295 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2296 SPDK_CU_ASSERT_FATAL(req != NULL); 2297 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2298 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2299 2300 poll_threads(); 2301 2302 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2303 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2304 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2305 } 2306 2307 static void 2308 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2309 struct spdk_nvme_ctrlr *ctrlr) 2310 { 2311 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2312 bdev_io->internal.in_submit_request = true; 2313 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2314 2315 bdev_nvme_submit_request(ch, bdev_io); 2316 2317 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2318 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2319 2320 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2321 poll_thread_times(1, 1); 2322 2323 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2324 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2325 2326 poll_thread_times(0, 1); 2327 2328 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2329 } 2330 2331 static void 2332 test_submit_nvme_cmd(void) 2333 { 2334 struct spdk_nvme_transport_id trid = {}; 2335 struct spdk_nvme_ctrlr *ctrlr; 2336 struct nvme_ctrlr *nvme_ctrlr; 2337 const int STRING_SIZE = 32; 2338 const char *attached_names[STRING_SIZE]; 2339 struct nvme_bdev *bdev; 2340 struct spdk_bdev_io *bdev_io; 2341 struct spdk_io_channel *ch; 2342 int rc; 2343 2344 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2345 ut_init_trid(&trid); 2346 2347 set_thread(1); 2348 2349 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2350 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2351 2352 g_ut_attach_ctrlr_status = 0; 2353 g_ut_attach_bdev_count = 1; 2354 2355 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2356 attach_ctrlr_done, NULL, NULL, NULL, false); 2357 CU_ASSERT(rc == 0); 2358 2359 spdk_delay_us(1000); 2360 poll_threads(); 2361 2362 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2363 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2364 2365 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2366 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2367 2368 set_thread(0); 2369 2370 ch = spdk_get_io_channel(bdev); 2371 SPDK_CU_ASSERT_FATAL(ch != NULL); 2372 2373 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2374 2375 bdev_io->u.bdev.iovs = NULL; 2376 2377 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2378 2379 ut_bdev_io_set_buf(bdev_io); 2380 2381 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2382 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2383 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2384 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2385 2386 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2387 2388 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2389 2390 /* Verify that ext NVME API is called when data is described by memory domain */ 2391 g_ut_readv_ext_called = false; 2392 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2393 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2394 CU_ASSERT(g_ut_readv_ext_called == true); 2395 g_ut_readv_ext_called = false; 2396 bdev_io->u.bdev.memory_domain = NULL; 2397 2398 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2399 2400 free(bdev_io); 2401 2402 spdk_put_io_channel(ch); 2403 2404 poll_threads(); 2405 2406 set_thread(1); 2407 2408 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2409 CU_ASSERT(rc == 0); 2410 2411 poll_threads(); 2412 spdk_delay_us(1000); 2413 poll_threads(); 2414 2415 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2416 } 2417 2418 static void 2419 test_add_remove_trid(void) 2420 { 2421 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2422 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2423 struct nvme_ctrlr *nvme_ctrlr = NULL; 2424 const int STRING_SIZE = 32; 2425 const char *attached_names[STRING_SIZE]; 2426 struct nvme_path_id *ctrid; 2427 int rc; 2428 2429 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2430 ut_init_trid(&path1.trid); 2431 ut_init_trid2(&path2.trid); 2432 ut_init_trid3(&path3.trid); 2433 2434 set_thread(0); 2435 2436 g_ut_attach_ctrlr_status = 0; 2437 g_ut_attach_bdev_count = 0; 2438 2439 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2440 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2441 2442 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2443 attach_ctrlr_done, NULL, NULL, NULL, false); 2444 CU_ASSERT(rc == 0); 2445 2446 spdk_delay_us(1000); 2447 poll_threads(); 2448 2449 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2450 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2451 2452 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2453 2454 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2455 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2456 2457 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2458 attach_ctrlr_done, NULL, NULL, NULL, false); 2459 CU_ASSERT(rc == 0); 2460 2461 spdk_delay_us(1000); 2462 poll_threads(); 2463 2464 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2465 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2466 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2467 break; 2468 } 2469 } 2470 CU_ASSERT(ctrid != NULL); 2471 2472 /* trid3 is not in the registered list. */ 2473 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2474 CU_ASSERT(rc == -ENXIO); 2475 2476 /* trid2 is not used, and simply removed. */ 2477 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2478 CU_ASSERT(rc == 0); 2479 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2480 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2481 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2482 } 2483 2484 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2485 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2486 2487 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2488 attach_ctrlr_done, NULL, NULL, NULL, false); 2489 CU_ASSERT(rc == 0); 2490 2491 spdk_delay_us(1000); 2492 poll_threads(); 2493 2494 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2495 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2496 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2497 break; 2498 } 2499 } 2500 CU_ASSERT(ctrid != NULL); 2501 2502 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2503 * If we add path2 again, path2 should be inserted between path1 and path3. 2504 * Then, we remove path2. It is not used, and simply removed. 2505 */ 2506 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2507 2508 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2509 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2510 2511 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2512 attach_ctrlr_done, NULL, NULL, NULL, false); 2513 CU_ASSERT(rc == 0); 2514 2515 spdk_delay_us(1000); 2516 poll_threads(); 2517 2518 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2519 2520 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2521 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2522 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2523 2524 ctrid = TAILQ_NEXT(ctrid, link); 2525 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2526 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2527 2528 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2529 CU_ASSERT(rc == 0); 2530 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2531 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2532 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2533 } 2534 2535 /* path1 is currently used and path3 is an alternative path. 2536 * If we remove path1, path is changed to path3. 2537 */ 2538 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2539 CU_ASSERT(rc == 0); 2540 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2541 CU_ASSERT(nvme_ctrlr->resetting == true); 2542 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2543 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2544 } 2545 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2546 2547 poll_threads(); 2548 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2549 poll_threads(); 2550 2551 CU_ASSERT(nvme_ctrlr->resetting == false); 2552 2553 /* path3 is the current and only path. If we remove path3, the corresponding 2554 * nvme_ctrlr is removed. 2555 */ 2556 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2557 CU_ASSERT(rc == 0); 2558 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2559 2560 poll_threads(); 2561 spdk_delay_us(1000); 2562 poll_threads(); 2563 2564 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2565 2566 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2567 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2568 2569 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2570 attach_ctrlr_done, NULL, NULL, NULL, false); 2571 CU_ASSERT(rc == 0); 2572 2573 spdk_delay_us(1000); 2574 poll_threads(); 2575 2576 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2577 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2578 2579 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2580 2581 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2582 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2583 2584 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2585 attach_ctrlr_done, NULL, NULL, NULL, false); 2586 CU_ASSERT(rc == 0); 2587 2588 spdk_delay_us(1000); 2589 poll_threads(); 2590 2591 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2592 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2593 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2594 break; 2595 } 2596 } 2597 CU_ASSERT(ctrid != NULL); 2598 2599 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2600 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2601 CU_ASSERT(rc == 0); 2602 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2603 2604 poll_threads(); 2605 spdk_delay_us(1000); 2606 poll_threads(); 2607 2608 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2609 } 2610 2611 static void 2612 test_abort(void) 2613 { 2614 struct spdk_nvme_transport_id trid = {}; 2615 struct nvme_ctrlr_opts opts = {}; 2616 struct spdk_nvme_ctrlr *ctrlr; 2617 struct nvme_ctrlr *nvme_ctrlr; 2618 const int STRING_SIZE = 32; 2619 const char *attached_names[STRING_SIZE]; 2620 struct nvme_bdev *bdev; 2621 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2622 struct spdk_io_channel *ch1, *ch2; 2623 struct nvme_bdev_channel *nbdev_ch1; 2624 struct nvme_io_path *io_path1; 2625 struct nvme_qpair *nvme_qpair1; 2626 int rc; 2627 2628 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2629 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2630 * are submitted on thread 1. Both should succeed. 2631 */ 2632 2633 ut_init_trid(&trid); 2634 2635 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2636 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2637 2638 g_ut_attach_ctrlr_status = 0; 2639 g_ut_attach_bdev_count = 1; 2640 2641 set_thread(1); 2642 2643 opts.ctrlr_loss_timeout_sec = -1; 2644 opts.reconnect_delay_sec = 1; 2645 2646 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2647 attach_ctrlr_done, NULL, NULL, &opts, false); 2648 CU_ASSERT(rc == 0); 2649 2650 spdk_delay_us(1000); 2651 poll_threads(); 2652 2653 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2654 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2655 2656 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2657 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2658 2659 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2660 ut_bdev_io_set_buf(write_io); 2661 2662 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2663 ut_bdev_io_set_buf(fuse_io); 2664 2665 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2666 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2667 2668 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2669 2670 set_thread(0); 2671 2672 ch1 = spdk_get_io_channel(bdev); 2673 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2674 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2675 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2676 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2677 nvme_qpair1 = io_path1->qpair; 2678 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2679 2680 set_thread(1); 2681 2682 ch2 = spdk_get_io_channel(bdev); 2683 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2684 2685 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2686 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2687 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2688 2689 /* Aborting the already completed request should fail. */ 2690 write_io->internal.in_submit_request = true; 2691 bdev_nvme_submit_request(ch1, write_io); 2692 poll_threads(); 2693 2694 CU_ASSERT(write_io->internal.in_submit_request == false); 2695 2696 abort_io->u.abort.bio_to_abort = write_io; 2697 abort_io->internal.in_submit_request = true; 2698 2699 bdev_nvme_submit_request(ch1, abort_io); 2700 2701 poll_threads(); 2702 2703 CU_ASSERT(abort_io->internal.in_submit_request == false); 2704 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2705 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2706 2707 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2708 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2709 2710 admin_io->internal.in_submit_request = true; 2711 bdev_nvme_submit_request(ch1, admin_io); 2712 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2713 poll_threads(); 2714 2715 CU_ASSERT(admin_io->internal.in_submit_request == false); 2716 2717 abort_io->u.abort.bio_to_abort = admin_io; 2718 abort_io->internal.in_submit_request = true; 2719 2720 bdev_nvme_submit_request(ch2, abort_io); 2721 2722 poll_threads(); 2723 2724 CU_ASSERT(abort_io->internal.in_submit_request == false); 2725 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2726 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2727 2728 /* Aborting the write request should succeed. */ 2729 write_io->internal.in_submit_request = true; 2730 bdev_nvme_submit_request(ch1, write_io); 2731 2732 CU_ASSERT(write_io->internal.in_submit_request == true); 2733 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2734 2735 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2736 abort_io->u.abort.bio_to_abort = write_io; 2737 abort_io->internal.in_submit_request = true; 2738 2739 bdev_nvme_submit_request(ch1, abort_io); 2740 2741 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2742 poll_threads(); 2743 2744 CU_ASSERT(abort_io->internal.in_submit_request == false); 2745 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2746 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2747 CU_ASSERT(write_io->internal.in_submit_request == false); 2748 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2749 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2750 2751 /* Aborting the fuse request should succeed. */ 2752 fuse_io->internal.in_submit_request = true; 2753 bdev_nvme_submit_request(ch1, fuse_io); 2754 2755 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2756 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2757 2758 abort_io->u.abort.bio_to_abort = fuse_io; 2759 abort_io->internal.in_submit_request = true; 2760 2761 bdev_nvme_submit_request(ch1, abort_io); 2762 2763 spdk_delay_us(10000); 2764 poll_threads(); 2765 2766 CU_ASSERT(abort_io->internal.in_submit_request == false); 2767 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2768 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2769 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2770 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2771 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2772 2773 /* Aborting the admin request should succeed. */ 2774 admin_io->internal.in_submit_request = true; 2775 bdev_nvme_submit_request(ch1, admin_io); 2776 2777 CU_ASSERT(admin_io->internal.in_submit_request == true); 2778 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2779 2780 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2781 abort_io->u.abort.bio_to_abort = admin_io; 2782 abort_io->internal.in_submit_request = true; 2783 2784 bdev_nvme_submit_request(ch2, abort_io); 2785 2786 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2787 poll_threads(); 2788 2789 CU_ASSERT(abort_io->internal.in_submit_request == false); 2790 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2791 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2792 CU_ASSERT(admin_io->internal.in_submit_request == false); 2793 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2794 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2795 2796 set_thread(0); 2797 2798 /* If qpair is disconnected, it is freed and then reconnected via resetting 2799 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2800 * while resetting the nvme_ctrlr. 2801 */ 2802 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2803 2804 poll_thread_times(0, 3); 2805 2806 CU_ASSERT(nvme_qpair1->qpair == NULL); 2807 CU_ASSERT(nvme_ctrlr->resetting == true); 2808 2809 write_io->internal.in_submit_request = true; 2810 2811 bdev_nvme_submit_request(ch1, write_io); 2812 2813 CU_ASSERT(write_io->internal.in_submit_request == true); 2814 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2815 2816 /* Aborting the queued write request should succeed immediately. */ 2817 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2818 abort_io->u.abort.bio_to_abort = write_io; 2819 abort_io->internal.in_submit_request = true; 2820 2821 bdev_nvme_submit_request(ch1, abort_io); 2822 2823 CU_ASSERT(abort_io->internal.in_submit_request == false); 2824 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2825 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2826 CU_ASSERT(write_io->internal.in_submit_request == false); 2827 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2828 2829 poll_threads(); 2830 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2831 poll_threads(); 2832 2833 spdk_put_io_channel(ch1); 2834 2835 set_thread(1); 2836 2837 spdk_put_io_channel(ch2); 2838 2839 poll_threads(); 2840 2841 free(write_io); 2842 free(fuse_io); 2843 free(admin_io); 2844 free(abort_io); 2845 2846 set_thread(1); 2847 2848 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2849 CU_ASSERT(rc == 0); 2850 2851 poll_threads(); 2852 spdk_delay_us(1000); 2853 poll_threads(); 2854 2855 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2856 } 2857 2858 static void 2859 test_get_io_qpair(void) 2860 { 2861 struct spdk_nvme_transport_id trid = {}; 2862 struct spdk_nvme_ctrlr ctrlr = {}; 2863 struct nvme_ctrlr *nvme_ctrlr = NULL; 2864 struct spdk_io_channel *ch; 2865 struct nvme_ctrlr_channel *ctrlr_ch; 2866 struct spdk_nvme_qpair *qpair; 2867 int rc; 2868 2869 ut_init_trid(&trid); 2870 TAILQ_INIT(&ctrlr.active_io_qpairs); 2871 2872 set_thread(0); 2873 2874 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2875 CU_ASSERT(rc == 0); 2876 2877 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2878 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2879 2880 ch = spdk_get_io_channel(nvme_ctrlr); 2881 SPDK_CU_ASSERT_FATAL(ch != NULL); 2882 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2883 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2884 2885 qpair = bdev_nvme_get_io_qpair(ch); 2886 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2887 2888 spdk_put_io_channel(ch); 2889 2890 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2891 CU_ASSERT(rc == 0); 2892 2893 poll_threads(); 2894 spdk_delay_us(1000); 2895 poll_threads(); 2896 2897 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2898 } 2899 2900 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2901 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2902 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2903 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2904 */ 2905 static void 2906 test_bdev_unregister(void) 2907 { 2908 struct spdk_nvme_transport_id trid = {}; 2909 struct spdk_nvme_ctrlr *ctrlr; 2910 struct nvme_ctrlr *nvme_ctrlr; 2911 struct nvme_ns *nvme_ns1, *nvme_ns2; 2912 const int STRING_SIZE = 32; 2913 const char *attached_names[STRING_SIZE]; 2914 struct nvme_bdev *bdev1, *bdev2; 2915 int rc; 2916 2917 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2918 ut_init_trid(&trid); 2919 2920 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2921 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2922 2923 g_ut_attach_ctrlr_status = 0; 2924 g_ut_attach_bdev_count = 2; 2925 2926 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2927 attach_ctrlr_done, NULL, NULL, NULL, false); 2928 CU_ASSERT(rc == 0); 2929 2930 spdk_delay_us(1000); 2931 poll_threads(); 2932 2933 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2934 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2935 2936 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2937 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2938 2939 bdev1 = nvme_ns1->bdev; 2940 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2941 2942 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2943 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2944 2945 bdev2 = nvme_ns2->bdev; 2946 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2947 2948 bdev_nvme_destruct(&bdev1->disk); 2949 bdev_nvme_destruct(&bdev2->disk); 2950 2951 poll_threads(); 2952 2953 CU_ASSERT(nvme_ns1->bdev == NULL); 2954 CU_ASSERT(nvme_ns2->bdev == NULL); 2955 2956 nvme_ctrlr->destruct = true; 2957 _nvme_ctrlr_destruct(nvme_ctrlr); 2958 2959 poll_threads(); 2960 spdk_delay_us(1000); 2961 poll_threads(); 2962 2963 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2964 } 2965 2966 static void 2967 test_compare_ns(void) 2968 { 2969 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2970 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2971 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2972 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 2973 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 2974 2975 /* No IDs are defined. */ 2976 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2977 2978 /* Only EUI64 are defined and not matched. */ 2979 nsdata1.eui64 = 0xABCDEF0123456789; 2980 nsdata2.eui64 = 0xBBCDEF0123456789; 2981 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2982 2983 /* Only EUI64 are defined and matched. */ 2984 nsdata2.eui64 = 0xABCDEF0123456789; 2985 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2986 2987 /* Only NGUID are defined and not matched. */ 2988 nsdata1.eui64 = 0x0; 2989 nsdata2.eui64 = 0x0; 2990 nsdata1.nguid[0] = 0x12; 2991 nsdata2.nguid[0] = 0x10; 2992 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2993 2994 /* Only NGUID are defined and matched. */ 2995 nsdata2.nguid[0] = 0x12; 2996 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2997 2998 /* Only UUID are defined and not matched. */ 2999 nsdata1.nguid[0] = 0x0; 3000 nsdata2.nguid[0] = 0x0; 3001 ns1.uuid = &uuid1; 3002 ns2.uuid = &uuid2; 3003 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3004 3005 /* Only one UUID is defined. */ 3006 ns1.uuid = NULL; 3007 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3008 3009 /* Only UUID are defined and matched. */ 3010 ns1.uuid = &uuid2; 3011 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3012 3013 /* All EUI64, NGUID, and UUID are defined and matched. */ 3014 nsdata1.eui64 = 0x123456789ABCDEF; 3015 nsdata2.eui64 = 0x123456789ABCDEF; 3016 nsdata1.nguid[15] = 0x34; 3017 nsdata2.nguid[15] = 0x34; 3018 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3019 3020 /* CSI are not matched. */ 3021 ns1.csi = SPDK_NVME_CSI_ZNS; 3022 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3023 } 3024 3025 static void 3026 test_init_ana_log_page(void) 3027 { 3028 struct spdk_nvme_transport_id trid = {}; 3029 struct spdk_nvme_ctrlr *ctrlr; 3030 struct nvme_ctrlr *nvme_ctrlr; 3031 const int STRING_SIZE = 32; 3032 const char *attached_names[STRING_SIZE]; 3033 int rc; 3034 3035 set_thread(0); 3036 3037 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3038 ut_init_trid(&trid); 3039 3040 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3041 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3042 3043 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3044 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3045 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3046 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3047 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3048 3049 g_ut_attach_ctrlr_status = 0; 3050 g_ut_attach_bdev_count = 5; 3051 3052 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3053 attach_ctrlr_done, NULL, NULL, NULL, false); 3054 CU_ASSERT(rc == 0); 3055 3056 spdk_delay_us(1000); 3057 poll_threads(); 3058 3059 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3060 poll_threads(); 3061 3062 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3063 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3064 3065 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3066 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3067 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3068 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3069 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3070 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3071 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3072 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3073 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3074 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3075 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3076 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3077 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3078 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3079 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3080 3081 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3082 CU_ASSERT(rc == 0); 3083 3084 poll_threads(); 3085 spdk_delay_us(1000); 3086 poll_threads(); 3087 3088 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3089 } 3090 3091 static void 3092 init_accel(void) 3093 { 3094 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3095 sizeof(int), "accel_p"); 3096 } 3097 3098 static void 3099 fini_accel(void) 3100 { 3101 spdk_io_device_unregister(g_accel_p, NULL); 3102 } 3103 3104 static void 3105 test_get_memory_domains(void) 3106 { 3107 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3108 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3109 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3110 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3111 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3112 struct spdk_memory_domain *domains[4] = {}; 3113 int rc = 0; 3114 3115 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3116 3117 /* nvme controller doesn't have memory domains */ 3118 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3119 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3120 CU_ASSERT(rc == 0); 3121 CU_ASSERT(domains[0] == NULL); 3122 CU_ASSERT(domains[1] == NULL); 3123 3124 /* nvme controller has a memory domain */ 3125 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3126 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3127 CU_ASSERT(rc == 1); 3128 CU_ASSERT(domains[0] != NULL); 3129 memset(domains, 0, sizeof(domains)); 3130 3131 /* multipath, 2 controllers report 1 memory domain each */ 3132 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3133 3134 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3135 CU_ASSERT(rc == 2); 3136 CU_ASSERT(domains[0] != NULL); 3137 CU_ASSERT(domains[1] != NULL); 3138 memset(domains, 0, sizeof(domains)); 3139 3140 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3141 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3142 CU_ASSERT(rc == 2); 3143 3144 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3145 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3146 CU_ASSERT(rc == 2); 3147 CU_ASSERT(domains[0] == NULL); 3148 CU_ASSERT(domains[1] == NULL); 3149 3150 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3151 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3152 CU_ASSERT(rc == 2); 3153 CU_ASSERT(domains[0] != NULL); 3154 CU_ASSERT(domains[1] == NULL); 3155 memset(domains, 0, sizeof(domains)); 3156 3157 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3158 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3159 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3160 CU_ASSERT(rc == 4); 3161 CU_ASSERT(domains[0] != NULL); 3162 CU_ASSERT(domains[1] != NULL); 3163 CU_ASSERT(domains[2] != NULL); 3164 CU_ASSERT(domains[3] != NULL); 3165 memset(domains, 0, sizeof(domains)); 3166 3167 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3168 * Array size is less than the number of memory domains */ 3169 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3170 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3171 CU_ASSERT(rc == 4); 3172 CU_ASSERT(domains[0] != NULL); 3173 CU_ASSERT(domains[1] != NULL); 3174 CU_ASSERT(domains[2] != NULL); 3175 CU_ASSERT(domains[3] == NULL); 3176 memset(domains, 0, sizeof(domains)); 3177 3178 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3179 } 3180 3181 static void 3182 test_reconnect_qpair(void) 3183 { 3184 struct spdk_nvme_transport_id trid = {}; 3185 struct spdk_nvme_ctrlr *ctrlr; 3186 struct nvme_ctrlr *nvme_ctrlr; 3187 const int STRING_SIZE = 32; 3188 const char *attached_names[STRING_SIZE]; 3189 struct nvme_bdev *bdev; 3190 struct spdk_io_channel *ch1, *ch2; 3191 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3192 struct nvme_io_path *io_path1, *io_path2; 3193 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3194 int rc; 3195 3196 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3197 ut_init_trid(&trid); 3198 3199 set_thread(0); 3200 3201 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3202 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3203 3204 g_ut_attach_ctrlr_status = 0; 3205 g_ut_attach_bdev_count = 1; 3206 3207 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3208 attach_ctrlr_done, NULL, NULL, NULL, false); 3209 CU_ASSERT(rc == 0); 3210 3211 spdk_delay_us(1000); 3212 poll_threads(); 3213 3214 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3215 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3216 3217 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3218 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3219 3220 ch1 = spdk_get_io_channel(bdev); 3221 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3222 3223 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3224 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3225 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3226 nvme_qpair1 = io_path1->qpair; 3227 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3228 3229 set_thread(1); 3230 3231 ch2 = spdk_get_io_channel(bdev); 3232 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3233 3234 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3235 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3236 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3237 nvme_qpair2 = io_path2->qpair; 3238 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3239 3240 /* If a qpair is disconnected, it is freed and then reconnected via 3241 * resetting the corresponding nvme_ctrlr. 3242 */ 3243 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3244 ctrlr->is_failed = true; 3245 3246 poll_thread_times(1, 3); 3247 CU_ASSERT(nvme_qpair1->qpair != NULL); 3248 CU_ASSERT(nvme_qpair2->qpair == NULL); 3249 CU_ASSERT(nvme_ctrlr->resetting == true); 3250 3251 poll_thread_times(0, 3); 3252 CU_ASSERT(nvme_qpair1->qpair == NULL); 3253 CU_ASSERT(nvme_qpair2->qpair == NULL); 3254 CU_ASSERT(ctrlr->is_failed == true); 3255 3256 poll_thread_times(1, 2); 3257 poll_thread_times(0, 1); 3258 CU_ASSERT(ctrlr->is_failed == false); 3259 CU_ASSERT(ctrlr->adminq.is_connected == false); 3260 3261 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3262 poll_thread_times(0, 2); 3263 CU_ASSERT(ctrlr->adminq.is_connected == true); 3264 3265 poll_thread_times(0, 1); 3266 poll_thread_times(1, 1); 3267 CU_ASSERT(nvme_qpair1->qpair != NULL); 3268 CU_ASSERT(nvme_qpair2->qpair != NULL); 3269 CU_ASSERT(nvme_ctrlr->resetting == true); 3270 3271 poll_thread_times(0, 2); 3272 poll_thread_times(1, 1); 3273 poll_thread_times(0, 1); 3274 CU_ASSERT(nvme_ctrlr->resetting == false); 3275 3276 poll_threads(); 3277 3278 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3279 * fails, the qpair is just freed. 3280 */ 3281 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3282 ctrlr->is_failed = true; 3283 ctrlr->fail_reset = true; 3284 3285 poll_thread_times(1, 3); 3286 CU_ASSERT(nvme_qpair1->qpair != NULL); 3287 CU_ASSERT(nvme_qpair2->qpair == NULL); 3288 CU_ASSERT(nvme_ctrlr->resetting == true); 3289 3290 poll_thread_times(0, 3); 3291 poll_thread_times(1, 1); 3292 CU_ASSERT(nvme_qpair1->qpair == NULL); 3293 CU_ASSERT(nvme_qpair2->qpair == NULL); 3294 CU_ASSERT(ctrlr->is_failed == true); 3295 3296 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3297 poll_thread_times(0, 3); 3298 poll_thread_times(1, 1); 3299 poll_thread_times(0, 1); 3300 CU_ASSERT(ctrlr->is_failed == true); 3301 CU_ASSERT(nvme_ctrlr->resetting == false); 3302 CU_ASSERT(nvme_qpair1->qpair == NULL); 3303 CU_ASSERT(nvme_qpair2->qpair == NULL); 3304 3305 poll_threads(); 3306 3307 spdk_put_io_channel(ch2); 3308 3309 set_thread(0); 3310 3311 spdk_put_io_channel(ch1); 3312 3313 poll_threads(); 3314 3315 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3316 CU_ASSERT(rc == 0); 3317 3318 poll_threads(); 3319 spdk_delay_us(1000); 3320 poll_threads(); 3321 3322 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3323 } 3324 3325 static void 3326 test_create_bdev_ctrlr(void) 3327 { 3328 struct nvme_path_id path1 = {}, path2 = {}; 3329 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3330 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3331 const int STRING_SIZE = 32; 3332 const char *attached_names[STRING_SIZE]; 3333 int rc; 3334 3335 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3336 ut_init_trid(&path1.trid); 3337 ut_init_trid2(&path2.trid); 3338 3339 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3340 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3341 3342 g_ut_attach_ctrlr_status = 0; 3343 g_ut_attach_bdev_count = 0; 3344 3345 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3346 attach_ctrlr_done, NULL, NULL, NULL, true); 3347 CU_ASSERT(rc == 0); 3348 3349 spdk_delay_us(1000); 3350 poll_threads(); 3351 3352 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3353 poll_threads(); 3354 3355 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3356 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3357 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3358 3359 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3360 g_ut_attach_ctrlr_status = -EINVAL; 3361 3362 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3363 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3364 3365 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3366 3367 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3368 attach_ctrlr_done, NULL, NULL, NULL, true); 3369 CU_ASSERT(rc == 0); 3370 3371 spdk_delay_us(1000); 3372 poll_threads(); 3373 3374 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3375 poll_threads(); 3376 3377 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3378 3379 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3380 g_ut_attach_ctrlr_status = 0; 3381 3382 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3383 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3384 3385 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3386 attach_ctrlr_done, NULL, NULL, NULL, true); 3387 CU_ASSERT(rc == 0); 3388 3389 spdk_delay_us(1000); 3390 poll_threads(); 3391 3392 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3393 poll_threads(); 3394 3395 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3396 3397 /* Delete two ctrlrs at once. */ 3398 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3399 CU_ASSERT(rc == 0); 3400 3401 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3402 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3403 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3404 3405 poll_threads(); 3406 spdk_delay_us(1000); 3407 poll_threads(); 3408 3409 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3410 3411 /* Add two ctrlrs and delete one by one. */ 3412 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3413 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3414 3415 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3416 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3417 3418 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3419 attach_ctrlr_done, NULL, NULL, NULL, true); 3420 CU_ASSERT(rc == 0); 3421 3422 spdk_delay_us(1000); 3423 poll_threads(); 3424 3425 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3426 poll_threads(); 3427 3428 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3429 attach_ctrlr_done, NULL, NULL, NULL, true); 3430 CU_ASSERT(rc == 0); 3431 3432 spdk_delay_us(1000); 3433 poll_threads(); 3434 3435 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3436 poll_threads(); 3437 3438 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3439 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3440 3441 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3442 CU_ASSERT(rc == 0); 3443 3444 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3445 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3446 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3447 3448 poll_threads(); 3449 spdk_delay_us(1000); 3450 poll_threads(); 3451 3452 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3453 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3454 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3455 3456 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3457 CU_ASSERT(rc == 0); 3458 3459 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3460 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3461 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3462 3463 poll_threads(); 3464 spdk_delay_us(1000); 3465 poll_threads(); 3466 3467 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3468 } 3469 3470 static struct nvme_ns * 3471 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3472 { 3473 struct nvme_ns *nvme_ns; 3474 3475 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3476 if (nvme_ns->ctrlr == nvme_ctrlr) { 3477 return nvme_ns; 3478 } 3479 } 3480 3481 return NULL; 3482 } 3483 3484 static void 3485 test_add_multi_ns_to_bdev(void) 3486 { 3487 struct nvme_path_id path1 = {}, path2 = {}; 3488 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3489 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3490 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3491 struct nvme_ns *nvme_ns1, *nvme_ns2; 3492 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3493 const int STRING_SIZE = 32; 3494 const char *attached_names[STRING_SIZE]; 3495 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3496 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3497 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3498 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3499 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3500 int rc; 3501 3502 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3503 ut_init_trid(&path1.trid); 3504 ut_init_trid2(&path2.trid); 3505 3506 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3507 3508 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3509 * namespaces are populated. 3510 */ 3511 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3512 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3513 3514 ctrlr1->ns[1].is_active = false; 3515 ctrlr1->ns[4].is_active = false; 3516 ctrlr1->ns[0].uuid = &uuid1; 3517 ctrlr1->ns[2].uuid = &uuid3; 3518 ctrlr1->ns[3].uuid = &uuid4; 3519 3520 g_ut_attach_ctrlr_status = 0; 3521 g_ut_attach_bdev_count = 3; 3522 3523 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3524 attach_ctrlr_done, NULL, NULL, NULL, true); 3525 CU_ASSERT(rc == 0); 3526 3527 spdk_delay_us(1000); 3528 poll_threads(); 3529 3530 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3531 poll_threads(); 3532 3533 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3534 * namespaces are populated. The uuid of 4th namespace is different, and hence 3535 * adding 4th namespace to a bdev should fail. 3536 */ 3537 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3538 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3539 3540 ctrlr2->ns[2].is_active = false; 3541 ctrlr2->ns[4].is_active = false; 3542 ctrlr2->ns[0].uuid = &uuid1; 3543 ctrlr2->ns[1].uuid = &uuid2; 3544 ctrlr2->ns[3].uuid = &uuid44; 3545 3546 g_ut_attach_ctrlr_status = 0; 3547 g_ut_attach_bdev_count = 2; 3548 3549 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3550 attach_ctrlr_done, NULL, NULL, NULL, true); 3551 CU_ASSERT(rc == 0); 3552 3553 spdk_delay_us(1000); 3554 poll_threads(); 3555 3556 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3557 poll_threads(); 3558 3559 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3560 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3561 3562 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3563 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3564 3565 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3566 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3567 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3568 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3569 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3570 3571 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3572 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3573 3574 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3575 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3576 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3577 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3578 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3579 3580 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3581 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3582 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3583 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3584 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3585 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3586 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3587 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3588 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3589 3590 CU_ASSERT(bdev1->ref == 2); 3591 CU_ASSERT(bdev2->ref == 1); 3592 CU_ASSERT(bdev3->ref == 1); 3593 CU_ASSERT(bdev4->ref == 1); 3594 3595 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3596 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3597 CU_ASSERT(rc == 0); 3598 3599 poll_threads(); 3600 spdk_delay_us(1000); 3601 poll_threads(); 3602 3603 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3604 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3605 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3606 3607 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3608 CU_ASSERT(rc == 0); 3609 3610 poll_threads(); 3611 spdk_delay_us(1000); 3612 poll_threads(); 3613 3614 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3615 3616 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3617 * can be deleted when the bdev subsystem shutdown. 3618 */ 3619 g_ut_attach_bdev_count = 1; 3620 3621 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3622 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3623 3624 ctrlr1->ns[0].uuid = &uuid1; 3625 3626 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3627 attach_ctrlr_done, NULL, NULL, NULL, true); 3628 CU_ASSERT(rc == 0); 3629 3630 spdk_delay_us(1000); 3631 poll_threads(); 3632 3633 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3634 poll_threads(); 3635 3636 ut_init_trid2(&path2.trid); 3637 3638 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3639 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3640 3641 ctrlr2->ns[0].uuid = &uuid1; 3642 3643 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3644 attach_ctrlr_done, NULL, NULL, NULL, true); 3645 CU_ASSERT(rc == 0); 3646 3647 spdk_delay_us(1000); 3648 poll_threads(); 3649 3650 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3651 poll_threads(); 3652 3653 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3654 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3655 3656 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3657 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3658 3659 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3660 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3661 3662 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3663 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3664 3665 /* Check if a nvme_bdev has two nvme_ns. */ 3666 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3667 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3668 CU_ASSERT(nvme_ns1->bdev == bdev1); 3669 3670 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3671 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3672 CU_ASSERT(nvme_ns2->bdev == bdev1); 3673 3674 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3675 bdev_nvme_destruct(&bdev1->disk); 3676 3677 poll_threads(); 3678 3679 CU_ASSERT(nvme_ns1->bdev == NULL); 3680 CU_ASSERT(nvme_ns2->bdev == NULL); 3681 3682 nvme_ctrlr1->destruct = true; 3683 _nvme_ctrlr_destruct(nvme_ctrlr1); 3684 3685 poll_threads(); 3686 spdk_delay_us(1000); 3687 poll_threads(); 3688 3689 nvme_ctrlr2->destruct = true; 3690 _nvme_ctrlr_destruct(nvme_ctrlr2); 3691 3692 poll_threads(); 3693 spdk_delay_us(1000); 3694 poll_threads(); 3695 3696 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3697 } 3698 3699 static void 3700 test_add_multi_io_paths_to_nbdev_ch(void) 3701 { 3702 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3703 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3704 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3705 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3706 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3707 const int STRING_SIZE = 32; 3708 const char *attached_names[STRING_SIZE]; 3709 struct nvme_bdev *bdev; 3710 struct spdk_io_channel *ch; 3711 struct nvme_bdev_channel *nbdev_ch; 3712 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3713 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3714 int rc; 3715 3716 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3717 ut_init_trid(&path1.trid); 3718 ut_init_trid2(&path2.trid); 3719 ut_init_trid3(&path3.trid); 3720 g_ut_attach_ctrlr_status = 0; 3721 g_ut_attach_bdev_count = 1; 3722 3723 set_thread(1); 3724 3725 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3726 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3727 3728 ctrlr1->ns[0].uuid = &uuid1; 3729 3730 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3731 attach_ctrlr_done, NULL, NULL, NULL, true); 3732 CU_ASSERT(rc == 0); 3733 3734 spdk_delay_us(1000); 3735 poll_threads(); 3736 3737 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3738 poll_threads(); 3739 3740 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3741 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3742 3743 ctrlr2->ns[0].uuid = &uuid1; 3744 3745 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3746 attach_ctrlr_done, NULL, NULL, NULL, true); 3747 CU_ASSERT(rc == 0); 3748 3749 spdk_delay_us(1000); 3750 poll_threads(); 3751 3752 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3753 poll_threads(); 3754 3755 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3756 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3757 3758 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3759 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3760 3761 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3762 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3763 3764 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3765 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3766 3767 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3768 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3769 3770 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3771 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3772 3773 set_thread(0); 3774 3775 ch = spdk_get_io_channel(bdev); 3776 SPDK_CU_ASSERT_FATAL(ch != NULL); 3777 nbdev_ch = spdk_io_channel_get_ctx(ch); 3778 3779 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3780 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3781 3782 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3783 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3784 3785 set_thread(1); 3786 3787 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3788 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3789 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3790 3791 ctrlr3->ns[0].uuid = &uuid1; 3792 3793 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3794 attach_ctrlr_done, NULL, NULL, NULL, true); 3795 CU_ASSERT(rc == 0); 3796 3797 spdk_delay_us(1000); 3798 poll_threads(); 3799 3800 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3801 poll_threads(); 3802 3803 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3804 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3805 3806 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3807 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3808 3809 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3810 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3811 3812 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3813 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3814 CU_ASSERT(rc == 0); 3815 3816 poll_threads(); 3817 spdk_delay_us(1000); 3818 poll_threads(); 3819 3820 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3821 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3822 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3823 3824 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3825 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3826 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3827 3828 set_thread(0); 3829 3830 spdk_put_io_channel(ch); 3831 3832 poll_threads(); 3833 3834 set_thread(1); 3835 3836 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3837 CU_ASSERT(rc == 0); 3838 3839 poll_threads(); 3840 spdk_delay_us(1000); 3841 poll_threads(); 3842 3843 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3844 } 3845 3846 static void 3847 test_admin_path(void) 3848 { 3849 struct nvme_path_id path1 = {}, path2 = {}; 3850 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3851 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3852 const int STRING_SIZE = 32; 3853 const char *attached_names[STRING_SIZE]; 3854 struct nvme_bdev *bdev; 3855 struct spdk_io_channel *ch; 3856 struct spdk_bdev_io *bdev_io; 3857 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3858 int rc; 3859 3860 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3861 ut_init_trid(&path1.trid); 3862 ut_init_trid2(&path2.trid); 3863 g_ut_attach_ctrlr_status = 0; 3864 g_ut_attach_bdev_count = 1; 3865 3866 set_thread(0); 3867 3868 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3869 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3870 3871 ctrlr1->ns[0].uuid = &uuid1; 3872 3873 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3874 attach_ctrlr_done, NULL, NULL, NULL, true); 3875 CU_ASSERT(rc == 0); 3876 3877 spdk_delay_us(1000); 3878 poll_threads(); 3879 3880 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3881 poll_threads(); 3882 3883 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3884 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3885 3886 ctrlr2->ns[0].uuid = &uuid1; 3887 3888 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3889 attach_ctrlr_done, NULL, NULL, NULL, true); 3890 CU_ASSERT(rc == 0); 3891 3892 spdk_delay_us(1000); 3893 poll_threads(); 3894 3895 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3896 poll_threads(); 3897 3898 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3899 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3900 3901 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3902 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3903 3904 ch = spdk_get_io_channel(bdev); 3905 SPDK_CU_ASSERT_FATAL(ch != NULL); 3906 3907 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3908 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3909 3910 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3911 * submitted to ctrlr2. 3912 */ 3913 ctrlr1->is_failed = true; 3914 bdev_io->internal.in_submit_request = true; 3915 3916 bdev_nvme_submit_request(ch, bdev_io); 3917 3918 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3919 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3920 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3921 3922 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3923 poll_threads(); 3924 3925 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3926 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3927 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3928 3929 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3930 ctrlr2->is_failed = true; 3931 bdev_io->internal.in_submit_request = true; 3932 3933 bdev_nvme_submit_request(ch, bdev_io); 3934 3935 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3936 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3937 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3938 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3939 3940 free(bdev_io); 3941 3942 spdk_put_io_channel(ch); 3943 3944 poll_threads(); 3945 3946 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3947 CU_ASSERT(rc == 0); 3948 3949 poll_threads(); 3950 spdk_delay_us(1000); 3951 poll_threads(); 3952 3953 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3954 } 3955 3956 static struct nvme_io_path * 3957 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3958 struct nvme_ctrlr *nvme_ctrlr) 3959 { 3960 struct nvme_io_path *io_path; 3961 3962 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3963 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3964 return io_path; 3965 } 3966 } 3967 3968 return NULL; 3969 } 3970 3971 static void 3972 test_reset_bdev_ctrlr(void) 3973 { 3974 struct nvme_path_id path1 = {}, path2 = {}; 3975 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3976 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3977 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3978 struct nvme_path_id *curr_path1, *curr_path2; 3979 const int STRING_SIZE = 32; 3980 const char *attached_names[STRING_SIZE]; 3981 struct nvme_bdev *bdev; 3982 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 3983 struct nvme_bdev_io *first_bio; 3984 struct spdk_io_channel *ch1, *ch2; 3985 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3986 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 3987 int rc; 3988 3989 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3990 ut_init_trid(&path1.trid); 3991 ut_init_trid2(&path2.trid); 3992 g_ut_attach_ctrlr_status = 0; 3993 g_ut_attach_bdev_count = 1; 3994 3995 set_thread(0); 3996 3997 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3998 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3999 4000 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4001 attach_ctrlr_done, NULL, NULL, NULL, true); 4002 CU_ASSERT(rc == 0); 4003 4004 spdk_delay_us(1000); 4005 poll_threads(); 4006 4007 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4008 poll_threads(); 4009 4010 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4011 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4012 4013 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4014 attach_ctrlr_done, NULL, NULL, NULL, true); 4015 CU_ASSERT(rc == 0); 4016 4017 spdk_delay_us(1000); 4018 poll_threads(); 4019 4020 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4021 poll_threads(); 4022 4023 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4024 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4025 4026 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4027 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4028 4029 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4030 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4031 4032 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4033 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4034 4035 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4036 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4037 4038 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4039 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4040 4041 set_thread(0); 4042 4043 ch1 = spdk_get_io_channel(bdev); 4044 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4045 4046 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4047 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4048 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4049 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4050 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4051 4052 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4053 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4054 4055 set_thread(1); 4056 4057 ch2 = spdk_get_io_channel(bdev); 4058 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4059 4060 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4061 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4062 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4063 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4064 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4065 4066 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4067 4068 /* The first reset request from bdev_io is submitted on thread 0. 4069 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4070 * 4071 * A few extra polls are necessary after resetting ctrlr1 to check 4072 * pending reset requests for ctrlr1. 4073 */ 4074 ctrlr1->is_failed = true; 4075 curr_path1->last_failed_tsc = spdk_get_ticks(); 4076 ctrlr2->is_failed = true; 4077 curr_path2->last_failed_tsc = spdk_get_ticks(); 4078 4079 set_thread(0); 4080 4081 bdev_nvme_submit_request(ch1, first_bdev_io); 4082 CU_ASSERT(first_bio->io_path == io_path11); 4083 CU_ASSERT(nvme_ctrlr1->resetting == true); 4084 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4085 4086 poll_thread_times(0, 3); 4087 CU_ASSERT(io_path11->qpair->qpair == NULL); 4088 CU_ASSERT(io_path21->qpair->qpair != NULL); 4089 4090 poll_thread_times(1, 2); 4091 CU_ASSERT(io_path11->qpair->qpair == NULL); 4092 CU_ASSERT(io_path21->qpair->qpair == NULL); 4093 CU_ASSERT(ctrlr1->is_failed == true); 4094 4095 poll_thread_times(0, 1); 4096 CU_ASSERT(nvme_ctrlr1->resetting == true); 4097 CU_ASSERT(ctrlr1->is_failed == false); 4098 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4099 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4100 4101 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4102 poll_thread_times(0, 2); 4103 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4104 4105 poll_thread_times(0, 1); 4106 CU_ASSERT(io_path11->qpair->qpair != NULL); 4107 CU_ASSERT(io_path21->qpair->qpair == NULL); 4108 4109 poll_thread_times(1, 1); 4110 CU_ASSERT(io_path11->qpair->qpair != NULL); 4111 CU_ASSERT(io_path21->qpair->qpair != NULL); 4112 4113 poll_thread_times(0, 2); 4114 CU_ASSERT(nvme_ctrlr1->resetting == true); 4115 poll_thread_times(1, 1); 4116 CU_ASSERT(nvme_ctrlr1->resetting == true); 4117 poll_thread_times(0, 2); 4118 CU_ASSERT(nvme_ctrlr1->resetting == false); 4119 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4120 CU_ASSERT(first_bio->io_path == io_path12); 4121 CU_ASSERT(nvme_ctrlr2->resetting == true); 4122 4123 poll_thread_times(0, 3); 4124 CU_ASSERT(io_path12->qpair->qpair == NULL); 4125 CU_ASSERT(io_path22->qpair->qpair != NULL); 4126 4127 poll_thread_times(1, 2); 4128 CU_ASSERT(io_path12->qpair->qpair == NULL); 4129 CU_ASSERT(io_path22->qpair->qpair == NULL); 4130 CU_ASSERT(ctrlr2->is_failed == true); 4131 4132 poll_thread_times(0, 1); 4133 CU_ASSERT(nvme_ctrlr2->resetting == true); 4134 CU_ASSERT(ctrlr2->is_failed == false); 4135 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4136 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4137 4138 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4139 poll_thread_times(0, 2); 4140 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4141 4142 poll_thread_times(0, 1); 4143 CU_ASSERT(io_path12->qpair->qpair != NULL); 4144 CU_ASSERT(io_path22->qpair->qpair == NULL); 4145 4146 poll_thread_times(1, 2); 4147 CU_ASSERT(io_path12->qpair->qpair != NULL); 4148 CU_ASSERT(io_path22->qpair->qpair != NULL); 4149 4150 poll_thread_times(0, 2); 4151 CU_ASSERT(nvme_ctrlr2->resetting == true); 4152 poll_thread_times(1, 1); 4153 CU_ASSERT(nvme_ctrlr2->resetting == true); 4154 poll_thread_times(0, 2); 4155 CU_ASSERT(first_bio->io_path == NULL); 4156 CU_ASSERT(nvme_ctrlr2->resetting == false); 4157 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4158 4159 poll_threads(); 4160 4161 /* There is a race between two reset requests from bdev_io. 4162 * 4163 * The first reset request is submitted on thread 0, and the second reset 4164 * request is submitted on thread 1 while the first is resetting ctrlr1. 4165 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4166 * both reset requests go to ctrlr2. The first comes earlier than the second. 4167 * The second is pending on ctrlr2 again. After the first completes resetting 4168 * ctrl2, both complete successfully. 4169 */ 4170 ctrlr1->is_failed = true; 4171 curr_path1->last_failed_tsc = spdk_get_ticks(); 4172 ctrlr2->is_failed = true; 4173 curr_path2->last_failed_tsc = spdk_get_ticks(); 4174 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4175 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4176 4177 set_thread(0); 4178 4179 bdev_nvme_submit_request(ch1, first_bdev_io); 4180 4181 set_thread(1); 4182 4183 bdev_nvme_submit_request(ch2, second_bdev_io); 4184 4185 CU_ASSERT(nvme_ctrlr1->resetting == true); 4186 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4187 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4188 4189 poll_threads(); 4190 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4191 poll_threads(); 4192 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4193 poll_threads(); 4194 4195 CU_ASSERT(ctrlr1->is_failed == false); 4196 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4197 CU_ASSERT(ctrlr2->is_failed == false); 4198 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4199 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4200 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4201 4202 set_thread(0); 4203 4204 spdk_put_io_channel(ch1); 4205 4206 set_thread(1); 4207 4208 spdk_put_io_channel(ch2); 4209 4210 poll_threads(); 4211 4212 set_thread(0); 4213 4214 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4215 CU_ASSERT(rc == 0); 4216 4217 poll_threads(); 4218 spdk_delay_us(1000); 4219 poll_threads(); 4220 4221 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4222 4223 free(first_bdev_io); 4224 free(second_bdev_io); 4225 } 4226 4227 static void 4228 test_find_io_path(void) 4229 { 4230 struct nvme_bdev_channel nbdev_ch = { 4231 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4232 }; 4233 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4234 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4235 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4236 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4237 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4238 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4239 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 4240 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4241 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4242 4243 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4244 4245 /* Test if io_path whose ANA state is not accessible is excluded. */ 4246 4247 nvme_qpair1.qpair = &qpair1; 4248 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4249 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4250 4251 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4252 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4253 4254 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4255 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4256 4257 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4258 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4259 4260 nbdev_ch.current_io_path = NULL; 4261 4262 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4263 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4264 4265 nbdev_ch.current_io_path = NULL; 4266 4267 /* Test if io_path whose qpair is resetting is excluded. */ 4268 4269 nvme_qpair1.qpair = NULL; 4270 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4271 4272 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4273 4274 /* Test if ANA optimized state or the first found ANA non-optimized state 4275 * is prioritized. 4276 */ 4277 4278 nvme_qpair1.qpair = &qpair1; 4279 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4280 nvme_qpair2.qpair = &qpair2; 4281 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4282 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4283 4284 nbdev_ch.current_io_path = NULL; 4285 4286 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4287 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4288 4289 nbdev_ch.current_io_path = NULL; 4290 } 4291 4292 static void 4293 test_retry_io_if_ana_state_is_updating(void) 4294 { 4295 struct nvme_path_id path = {}; 4296 struct nvme_ctrlr_opts opts = {}; 4297 struct spdk_nvme_ctrlr *ctrlr; 4298 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4299 struct nvme_ctrlr *nvme_ctrlr; 4300 const int STRING_SIZE = 32; 4301 const char *attached_names[STRING_SIZE]; 4302 struct nvme_bdev *bdev; 4303 struct nvme_ns *nvme_ns; 4304 struct spdk_bdev_io *bdev_io1; 4305 struct spdk_io_channel *ch; 4306 struct nvme_bdev_channel *nbdev_ch; 4307 struct nvme_io_path *io_path; 4308 struct nvme_qpair *nvme_qpair; 4309 int rc; 4310 4311 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4312 ut_init_trid(&path.trid); 4313 4314 set_thread(0); 4315 4316 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4317 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4318 4319 g_ut_attach_ctrlr_status = 0; 4320 g_ut_attach_bdev_count = 1; 4321 4322 opts.ctrlr_loss_timeout_sec = -1; 4323 opts.reconnect_delay_sec = 1; 4324 4325 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4326 attach_ctrlr_done, NULL, NULL, &opts, false); 4327 CU_ASSERT(rc == 0); 4328 4329 spdk_delay_us(1000); 4330 poll_threads(); 4331 4332 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4333 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4334 4335 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4336 CU_ASSERT(nvme_ctrlr != NULL); 4337 4338 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4339 CU_ASSERT(bdev != NULL); 4340 4341 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4342 CU_ASSERT(nvme_ns != NULL); 4343 4344 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4345 ut_bdev_io_set_buf(bdev_io1); 4346 4347 ch = spdk_get_io_channel(bdev); 4348 SPDK_CU_ASSERT_FATAL(ch != NULL); 4349 4350 nbdev_ch = spdk_io_channel_get_ctx(ch); 4351 4352 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4353 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4354 4355 nvme_qpair = io_path->qpair; 4356 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4357 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4358 4359 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4360 4361 /* If qpair is connected, I/O should succeed. */ 4362 bdev_io1->internal.in_submit_request = true; 4363 4364 bdev_nvme_submit_request(ch, bdev_io1); 4365 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4366 4367 poll_threads(); 4368 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4369 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4370 4371 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4372 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4373 nbdev_ch->current_io_path = NULL; 4374 4375 bdev_io1->internal.in_submit_request = true; 4376 4377 bdev_nvme_submit_request(ch, bdev_io1); 4378 4379 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4380 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4381 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4382 4383 /* ANA state became accessible while I/O was queued. */ 4384 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4385 4386 spdk_delay_us(1000000); 4387 4388 poll_thread_times(0, 1); 4389 4390 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4391 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4392 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4393 4394 poll_threads(); 4395 4396 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4397 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4398 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4399 4400 free(bdev_io1); 4401 4402 spdk_put_io_channel(ch); 4403 4404 poll_threads(); 4405 4406 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4407 CU_ASSERT(rc == 0); 4408 4409 poll_threads(); 4410 spdk_delay_us(1000); 4411 poll_threads(); 4412 4413 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4414 } 4415 4416 static void 4417 test_retry_io_for_io_path_error(void) 4418 { 4419 struct nvme_path_id path1 = {}, path2 = {}; 4420 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4421 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4422 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4423 const int STRING_SIZE = 32; 4424 const char *attached_names[STRING_SIZE]; 4425 struct nvme_bdev *bdev; 4426 struct nvme_ns *nvme_ns1, *nvme_ns2; 4427 struct spdk_bdev_io *bdev_io; 4428 struct nvme_bdev_io *bio; 4429 struct spdk_io_channel *ch; 4430 struct nvme_bdev_channel *nbdev_ch; 4431 struct nvme_io_path *io_path1, *io_path2; 4432 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4433 struct ut_nvme_req *req; 4434 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4435 int rc; 4436 4437 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4438 ut_init_trid(&path1.trid); 4439 ut_init_trid2(&path2.trid); 4440 4441 g_opts.bdev_retry_count = 1; 4442 4443 set_thread(0); 4444 4445 g_ut_attach_ctrlr_status = 0; 4446 g_ut_attach_bdev_count = 1; 4447 4448 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4449 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4450 4451 ctrlr1->ns[0].uuid = &uuid1; 4452 4453 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4454 attach_ctrlr_done, NULL, NULL, NULL, true); 4455 CU_ASSERT(rc == 0); 4456 4457 spdk_delay_us(1000); 4458 poll_threads(); 4459 4460 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4461 poll_threads(); 4462 4463 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4464 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4465 4466 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4467 CU_ASSERT(nvme_ctrlr1 != NULL); 4468 4469 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4470 CU_ASSERT(bdev != NULL); 4471 4472 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4473 CU_ASSERT(nvme_ns1 != NULL); 4474 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4475 4476 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4477 ut_bdev_io_set_buf(bdev_io); 4478 4479 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4480 4481 ch = spdk_get_io_channel(bdev); 4482 SPDK_CU_ASSERT_FATAL(ch != NULL); 4483 4484 nbdev_ch = spdk_io_channel_get_ctx(ch); 4485 4486 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4487 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4488 4489 nvme_qpair1 = io_path1->qpair; 4490 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4491 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4492 4493 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4494 4495 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4496 bdev_io->internal.in_submit_request = true; 4497 4498 bdev_nvme_submit_request(ch, bdev_io); 4499 4500 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4501 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4502 4503 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4504 SPDK_CU_ASSERT_FATAL(req != NULL); 4505 4506 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4507 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4508 req->cpl.status.dnr = 1; 4509 4510 poll_thread_times(0, 1); 4511 4512 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4513 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4514 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4515 4516 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4517 bdev_io->internal.in_submit_request = true; 4518 4519 bdev_nvme_submit_request(ch, bdev_io); 4520 4521 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4522 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4523 4524 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4525 SPDK_CU_ASSERT_FATAL(req != NULL); 4526 4527 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4528 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4529 4530 poll_thread_times(0, 1); 4531 4532 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4533 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4534 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4535 4536 poll_threads(); 4537 4538 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4539 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4540 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4541 4542 /* Add io_path2 dynamically, and create a multipath configuration. */ 4543 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4544 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4545 4546 ctrlr2->ns[0].uuid = &uuid1; 4547 4548 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4549 attach_ctrlr_done, NULL, NULL, NULL, true); 4550 CU_ASSERT(rc == 0); 4551 4552 spdk_delay_us(1000); 4553 poll_threads(); 4554 4555 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4556 poll_threads(); 4557 4558 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4559 CU_ASSERT(nvme_ctrlr2 != NULL); 4560 4561 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4562 CU_ASSERT(nvme_ns2 != NULL); 4563 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4564 4565 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4566 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4567 4568 nvme_qpair2 = io_path2->qpair; 4569 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4570 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4571 4572 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4573 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4574 * So after a retry, I/O is submitted to io_path2 and should succeed. 4575 */ 4576 bdev_io->internal.in_submit_request = true; 4577 4578 bdev_nvme_submit_request(ch, bdev_io); 4579 4580 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4581 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4582 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4583 4584 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4585 SPDK_CU_ASSERT_FATAL(req != NULL); 4586 4587 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4588 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4589 4590 poll_thread_times(0, 1); 4591 4592 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4593 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4594 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4595 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4596 4597 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4598 nvme_qpair1->qpair = NULL; 4599 4600 poll_threads(); 4601 4602 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4603 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4604 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4605 4606 free(bdev_io); 4607 4608 spdk_put_io_channel(ch); 4609 4610 poll_threads(); 4611 4612 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4613 CU_ASSERT(rc == 0); 4614 4615 poll_threads(); 4616 spdk_delay_us(1000); 4617 poll_threads(); 4618 4619 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4620 4621 g_opts.bdev_retry_count = 0; 4622 } 4623 4624 static void 4625 test_retry_io_count(void) 4626 { 4627 struct nvme_path_id path = {}; 4628 struct spdk_nvme_ctrlr *ctrlr; 4629 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4630 struct nvme_ctrlr *nvme_ctrlr; 4631 const int STRING_SIZE = 32; 4632 const char *attached_names[STRING_SIZE]; 4633 struct nvme_bdev *bdev; 4634 struct nvme_ns *nvme_ns; 4635 struct spdk_bdev_io *bdev_io; 4636 struct nvme_bdev_io *bio; 4637 struct spdk_io_channel *ch; 4638 struct nvme_bdev_channel *nbdev_ch; 4639 struct nvme_io_path *io_path; 4640 struct nvme_qpair *nvme_qpair; 4641 struct ut_nvme_req *req; 4642 int rc; 4643 4644 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4645 ut_init_trid(&path.trid); 4646 4647 set_thread(0); 4648 4649 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4650 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4651 4652 g_ut_attach_ctrlr_status = 0; 4653 g_ut_attach_bdev_count = 1; 4654 4655 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4656 attach_ctrlr_done, NULL, NULL, NULL, false); 4657 CU_ASSERT(rc == 0); 4658 4659 spdk_delay_us(1000); 4660 poll_threads(); 4661 4662 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4663 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4664 4665 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4666 CU_ASSERT(nvme_ctrlr != NULL); 4667 4668 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4669 CU_ASSERT(bdev != NULL); 4670 4671 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4672 CU_ASSERT(nvme_ns != NULL); 4673 4674 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4675 ut_bdev_io_set_buf(bdev_io); 4676 4677 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4678 4679 ch = spdk_get_io_channel(bdev); 4680 SPDK_CU_ASSERT_FATAL(ch != NULL); 4681 4682 nbdev_ch = spdk_io_channel_get_ctx(ch); 4683 4684 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4685 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4686 4687 nvme_qpair = io_path->qpair; 4688 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4689 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4690 4691 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4692 4693 /* If I/O is aborted by request, it should not be retried. */ 4694 g_opts.bdev_retry_count = 1; 4695 4696 bdev_io->internal.in_submit_request = true; 4697 4698 bdev_nvme_submit_request(ch, bdev_io); 4699 4700 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4701 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4702 4703 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4704 SPDK_CU_ASSERT_FATAL(req != NULL); 4705 4706 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4707 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4708 4709 poll_thread_times(0, 1); 4710 4711 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4712 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4713 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4714 4715 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4716 * the failed I/O should not be retried. 4717 */ 4718 g_opts.bdev_retry_count = 4; 4719 4720 bdev_io->internal.in_submit_request = true; 4721 4722 bdev_nvme_submit_request(ch, bdev_io); 4723 4724 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4725 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4726 4727 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4728 SPDK_CU_ASSERT_FATAL(req != NULL); 4729 4730 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4731 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4732 bio->retry_count = 4; 4733 4734 poll_thread_times(0, 1); 4735 4736 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4737 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4738 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4739 4740 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4741 g_opts.bdev_retry_count = -1; 4742 4743 bdev_io->internal.in_submit_request = true; 4744 4745 bdev_nvme_submit_request(ch, bdev_io); 4746 4747 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4748 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4749 4750 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4751 SPDK_CU_ASSERT_FATAL(req != NULL); 4752 4753 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4754 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4755 bio->retry_count = 4; 4756 4757 poll_thread_times(0, 1); 4758 4759 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4760 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4761 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4762 4763 poll_threads(); 4764 4765 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4766 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4767 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4768 4769 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4770 * the failed I/O should be retried. 4771 */ 4772 g_opts.bdev_retry_count = 4; 4773 4774 bdev_io->internal.in_submit_request = true; 4775 4776 bdev_nvme_submit_request(ch, bdev_io); 4777 4778 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4779 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4780 4781 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4782 SPDK_CU_ASSERT_FATAL(req != NULL); 4783 4784 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4785 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4786 bio->retry_count = 3; 4787 4788 poll_thread_times(0, 1); 4789 4790 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4791 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4792 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4793 4794 poll_threads(); 4795 4796 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4797 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4798 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4799 4800 free(bdev_io); 4801 4802 spdk_put_io_channel(ch); 4803 4804 poll_threads(); 4805 4806 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4807 CU_ASSERT(rc == 0); 4808 4809 poll_threads(); 4810 spdk_delay_us(1000); 4811 poll_threads(); 4812 4813 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4814 4815 g_opts.bdev_retry_count = 0; 4816 } 4817 4818 static void 4819 test_concurrent_read_ana_log_page(void) 4820 { 4821 struct spdk_nvme_transport_id trid = {}; 4822 struct spdk_nvme_ctrlr *ctrlr; 4823 struct nvme_ctrlr *nvme_ctrlr; 4824 const int STRING_SIZE = 32; 4825 const char *attached_names[STRING_SIZE]; 4826 int rc; 4827 4828 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4829 ut_init_trid(&trid); 4830 4831 set_thread(0); 4832 4833 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4834 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4835 4836 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4837 4838 g_ut_attach_ctrlr_status = 0; 4839 g_ut_attach_bdev_count = 1; 4840 4841 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4842 attach_ctrlr_done, NULL, NULL, NULL, false); 4843 CU_ASSERT(rc == 0); 4844 4845 spdk_delay_us(1000); 4846 poll_threads(); 4847 4848 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4849 poll_threads(); 4850 4851 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4852 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4853 4854 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4855 4856 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4857 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4858 4859 /* Following read request should be rejected. */ 4860 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4861 4862 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4863 4864 set_thread(1); 4865 4866 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4867 4868 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4869 4870 /* Reset request while reading ANA log page should not be rejected. */ 4871 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4872 CU_ASSERT(rc == 0); 4873 4874 poll_threads(); 4875 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4876 poll_threads(); 4877 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4878 poll_threads(); 4879 4880 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4881 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4882 4883 /* Read ANA log page while resetting ctrlr should be rejected. */ 4884 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4885 CU_ASSERT(rc == 0); 4886 4887 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4888 4889 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4890 4891 poll_threads(); 4892 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4893 poll_threads(); 4894 4895 set_thread(0); 4896 4897 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4898 CU_ASSERT(rc == 0); 4899 4900 poll_threads(); 4901 spdk_delay_us(1000); 4902 poll_threads(); 4903 4904 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4905 } 4906 4907 static void 4908 test_retry_io_for_ana_error(void) 4909 { 4910 struct nvme_path_id path = {}; 4911 struct spdk_nvme_ctrlr *ctrlr; 4912 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4913 struct nvme_ctrlr *nvme_ctrlr; 4914 const int STRING_SIZE = 32; 4915 const char *attached_names[STRING_SIZE]; 4916 struct nvme_bdev *bdev; 4917 struct nvme_ns *nvme_ns; 4918 struct spdk_bdev_io *bdev_io; 4919 struct nvme_bdev_io *bio; 4920 struct spdk_io_channel *ch; 4921 struct nvme_bdev_channel *nbdev_ch; 4922 struct nvme_io_path *io_path; 4923 struct nvme_qpair *nvme_qpair; 4924 struct ut_nvme_req *req; 4925 uint64_t now; 4926 int rc; 4927 4928 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4929 ut_init_trid(&path.trid); 4930 4931 g_opts.bdev_retry_count = 1; 4932 4933 set_thread(0); 4934 4935 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4936 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4937 4938 g_ut_attach_ctrlr_status = 0; 4939 g_ut_attach_bdev_count = 1; 4940 4941 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4942 attach_ctrlr_done, NULL, NULL, NULL, false); 4943 CU_ASSERT(rc == 0); 4944 4945 spdk_delay_us(1000); 4946 poll_threads(); 4947 4948 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4949 poll_threads(); 4950 4951 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4952 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4953 4954 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4955 CU_ASSERT(nvme_ctrlr != NULL); 4956 4957 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4958 CU_ASSERT(bdev != NULL); 4959 4960 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4961 CU_ASSERT(nvme_ns != NULL); 4962 4963 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4964 ut_bdev_io_set_buf(bdev_io); 4965 4966 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4967 4968 ch = spdk_get_io_channel(bdev); 4969 SPDK_CU_ASSERT_FATAL(ch != NULL); 4970 4971 nbdev_ch = spdk_io_channel_get_ctx(ch); 4972 4973 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4974 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4975 4976 nvme_qpair = io_path->qpair; 4977 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4978 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4979 4980 now = spdk_get_ticks(); 4981 4982 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4983 4984 /* If I/O got ANA error, it should be queued, the corresponding namespace 4985 * should be freezed and its ANA state should be updated. 4986 */ 4987 bdev_io->internal.in_submit_request = true; 4988 4989 bdev_nvme_submit_request(ch, bdev_io); 4990 4991 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4992 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4993 4994 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4995 SPDK_CU_ASSERT_FATAL(req != NULL); 4996 4997 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4998 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 4999 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5000 5001 poll_thread_times(0, 1); 5002 5003 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5004 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5005 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5006 /* I/O should be retried immediately. */ 5007 CU_ASSERT(bio->retry_ticks == now); 5008 CU_ASSERT(nvme_ns->ana_state_updating == true); 5009 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5010 5011 poll_threads(); 5012 5013 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5014 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5015 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5016 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5017 /* I/O should be retried after a second if no I/O path was found but 5018 * any I/O path may become available. 5019 */ 5020 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5021 5022 /* Namespace should be unfreezed after completing to update its ANA state. */ 5023 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5024 poll_threads(); 5025 5026 CU_ASSERT(nvme_ns->ana_state_updating == false); 5027 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5028 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5029 5030 /* Retry the queued I/O should succeed. */ 5031 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5032 poll_threads(); 5033 5034 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5035 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5036 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5037 5038 free(bdev_io); 5039 5040 spdk_put_io_channel(ch); 5041 5042 poll_threads(); 5043 5044 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5045 CU_ASSERT(rc == 0); 5046 5047 poll_threads(); 5048 spdk_delay_us(1000); 5049 poll_threads(); 5050 5051 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5052 5053 g_opts.bdev_retry_count = 0; 5054 } 5055 5056 static void 5057 test_check_io_error_resiliency_params(void) 5058 { 5059 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5060 * 3rd parameter is fast_io_fail_timeout_sec. 5061 */ 5062 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5063 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5064 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5065 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5066 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5067 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5068 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5069 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5070 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5071 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5072 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5073 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5074 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5075 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5076 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5077 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5078 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5079 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5080 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5081 } 5082 5083 static void 5084 test_retry_io_if_ctrlr_is_resetting(void) 5085 { 5086 struct nvme_path_id path = {}; 5087 struct nvme_ctrlr_opts opts = {}; 5088 struct spdk_nvme_ctrlr *ctrlr; 5089 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5090 struct nvme_ctrlr *nvme_ctrlr; 5091 const int STRING_SIZE = 32; 5092 const char *attached_names[STRING_SIZE]; 5093 struct nvme_bdev *bdev; 5094 struct nvme_ns *nvme_ns; 5095 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5096 struct spdk_io_channel *ch; 5097 struct nvme_bdev_channel *nbdev_ch; 5098 struct nvme_io_path *io_path; 5099 struct nvme_qpair *nvme_qpair; 5100 int rc; 5101 5102 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5103 ut_init_trid(&path.trid); 5104 5105 set_thread(0); 5106 5107 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5108 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5109 5110 g_ut_attach_ctrlr_status = 0; 5111 g_ut_attach_bdev_count = 1; 5112 5113 opts.ctrlr_loss_timeout_sec = -1; 5114 opts.reconnect_delay_sec = 1; 5115 5116 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5117 attach_ctrlr_done, NULL, NULL, &opts, false); 5118 CU_ASSERT(rc == 0); 5119 5120 spdk_delay_us(1000); 5121 poll_threads(); 5122 5123 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5124 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5125 5126 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5127 CU_ASSERT(nvme_ctrlr != NULL); 5128 5129 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5130 CU_ASSERT(bdev != NULL); 5131 5132 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5133 CU_ASSERT(nvme_ns != NULL); 5134 5135 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5136 ut_bdev_io_set_buf(bdev_io1); 5137 5138 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5139 ut_bdev_io_set_buf(bdev_io2); 5140 5141 ch = spdk_get_io_channel(bdev); 5142 SPDK_CU_ASSERT_FATAL(ch != NULL); 5143 5144 nbdev_ch = spdk_io_channel_get_ctx(ch); 5145 5146 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5147 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5148 5149 nvme_qpair = io_path->qpair; 5150 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5151 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5152 5153 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5154 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5155 5156 /* If qpair is connected, I/O should succeed. */ 5157 bdev_io1->internal.in_submit_request = true; 5158 5159 bdev_nvme_submit_request(ch, bdev_io1); 5160 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5161 5162 poll_threads(); 5163 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5164 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5165 5166 /* If qpair is disconnected, it is freed and then reconnected via resetting 5167 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5168 * while resetting the nvme_ctrlr. 5169 */ 5170 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5171 ctrlr->is_failed = true; 5172 5173 poll_thread_times(0, 5); 5174 5175 CU_ASSERT(nvme_qpair->qpair == NULL); 5176 CU_ASSERT(nvme_ctrlr->resetting == true); 5177 CU_ASSERT(ctrlr->is_failed == false); 5178 5179 bdev_io1->internal.in_submit_request = true; 5180 5181 bdev_nvme_submit_request(ch, bdev_io1); 5182 5183 spdk_delay_us(1); 5184 5185 bdev_io2->internal.in_submit_request = true; 5186 5187 bdev_nvme_submit_request(ch, bdev_io2); 5188 5189 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5190 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5191 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5192 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5193 5194 poll_threads(); 5195 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5196 poll_threads(); 5197 5198 CU_ASSERT(nvme_qpair->qpair != NULL); 5199 CU_ASSERT(nvme_ctrlr->resetting == false); 5200 5201 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5202 5203 poll_thread_times(0, 1); 5204 5205 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5206 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5207 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5208 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5209 5210 poll_threads(); 5211 5212 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5213 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5214 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5215 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5216 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5217 5218 spdk_delay_us(1); 5219 5220 poll_thread_times(0, 1); 5221 5222 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5223 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5224 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5225 5226 poll_threads(); 5227 5228 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5229 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5230 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5231 5232 free(bdev_io1); 5233 free(bdev_io2); 5234 5235 spdk_put_io_channel(ch); 5236 5237 poll_threads(); 5238 5239 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5240 CU_ASSERT(rc == 0); 5241 5242 poll_threads(); 5243 spdk_delay_us(1000); 5244 poll_threads(); 5245 5246 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5247 } 5248 5249 static void 5250 test_reconnect_ctrlr(void) 5251 { 5252 struct spdk_nvme_transport_id trid = {}; 5253 struct spdk_nvme_ctrlr ctrlr = {}; 5254 struct nvme_ctrlr *nvme_ctrlr; 5255 struct spdk_io_channel *ch1, *ch2; 5256 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5257 int rc; 5258 5259 ut_init_trid(&trid); 5260 TAILQ_INIT(&ctrlr.active_io_qpairs); 5261 5262 set_thread(0); 5263 5264 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5265 CU_ASSERT(rc == 0); 5266 5267 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5268 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5269 5270 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5271 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5272 5273 ch1 = spdk_get_io_channel(nvme_ctrlr); 5274 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5275 5276 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5277 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5278 5279 set_thread(1); 5280 5281 ch2 = spdk_get_io_channel(nvme_ctrlr); 5282 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5283 5284 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5285 5286 /* Reset starts from thread 1. */ 5287 set_thread(1); 5288 5289 /* The reset should fail and a reconnect timer should be registered. */ 5290 ctrlr.fail_reset = true; 5291 ctrlr.is_failed = true; 5292 5293 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5294 CU_ASSERT(rc == 0); 5295 CU_ASSERT(nvme_ctrlr->resetting == true); 5296 CU_ASSERT(ctrlr.is_failed == true); 5297 5298 poll_threads(); 5299 5300 CU_ASSERT(nvme_ctrlr->resetting == false); 5301 CU_ASSERT(ctrlr.is_failed == false); 5302 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5303 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5304 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5305 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5306 5307 /* A new reset starts from thread 0. */ 5308 set_thread(1); 5309 5310 /* The reset should cancel the reconnect timer and should start from reconnection. 5311 * Then, the reset should fail and a reconnect timer should be registered again. 5312 */ 5313 ctrlr.fail_reset = true; 5314 ctrlr.is_failed = true; 5315 5316 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5317 CU_ASSERT(rc == 0); 5318 CU_ASSERT(nvme_ctrlr->resetting == true); 5319 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5320 CU_ASSERT(ctrlr.is_failed == true); 5321 5322 poll_threads(); 5323 5324 CU_ASSERT(nvme_ctrlr->resetting == false); 5325 CU_ASSERT(ctrlr.is_failed == false); 5326 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5327 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5328 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5329 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5330 5331 /* Then a reconnect retry should suceeed. */ 5332 ctrlr.fail_reset = false; 5333 5334 spdk_delay_us(SPDK_SEC_TO_USEC); 5335 poll_thread_times(0, 1); 5336 5337 CU_ASSERT(nvme_ctrlr->resetting == true); 5338 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5339 5340 poll_threads(); 5341 5342 CU_ASSERT(nvme_ctrlr->resetting == false); 5343 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5344 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5345 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5346 5347 /* The reset should fail and a reconnect timer should be registered. */ 5348 ctrlr.fail_reset = true; 5349 ctrlr.is_failed = true; 5350 5351 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5352 CU_ASSERT(rc == 0); 5353 CU_ASSERT(nvme_ctrlr->resetting == true); 5354 CU_ASSERT(ctrlr.is_failed == true); 5355 5356 poll_threads(); 5357 5358 CU_ASSERT(nvme_ctrlr->resetting == false); 5359 CU_ASSERT(ctrlr.is_failed == false); 5360 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5361 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5362 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5363 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5364 5365 /* Then a reconnect retry should still fail. */ 5366 spdk_delay_us(SPDK_SEC_TO_USEC); 5367 poll_thread_times(0, 1); 5368 5369 CU_ASSERT(nvme_ctrlr->resetting == true); 5370 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5371 5372 poll_threads(); 5373 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5374 poll_threads(); 5375 5376 CU_ASSERT(nvme_ctrlr->resetting == false); 5377 CU_ASSERT(ctrlr.is_failed == false); 5378 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5379 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5380 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5381 5382 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5383 spdk_delay_us(SPDK_SEC_TO_USEC); 5384 poll_threads(); 5385 5386 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5387 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5388 CU_ASSERT(nvme_ctrlr->destruct == true); 5389 5390 spdk_put_io_channel(ch2); 5391 5392 set_thread(0); 5393 5394 spdk_put_io_channel(ch1); 5395 5396 poll_threads(); 5397 spdk_delay_us(1000); 5398 poll_threads(); 5399 5400 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5401 } 5402 5403 static struct nvme_path_id * 5404 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5405 const struct spdk_nvme_transport_id *trid) 5406 { 5407 struct nvme_path_id *p; 5408 5409 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5410 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5411 break; 5412 } 5413 } 5414 5415 return p; 5416 } 5417 5418 static void 5419 test_retry_failover_ctrlr(void) 5420 { 5421 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5422 struct spdk_nvme_ctrlr ctrlr = {}; 5423 struct nvme_ctrlr *nvme_ctrlr = NULL; 5424 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5425 struct spdk_io_channel *ch; 5426 struct nvme_ctrlr_channel *ctrlr_ch; 5427 int rc; 5428 5429 ut_init_trid(&trid1); 5430 ut_init_trid2(&trid2); 5431 ut_init_trid3(&trid3); 5432 TAILQ_INIT(&ctrlr.active_io_qpairs); 5433 5434 set_thread(0); 5435 5436 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5437 CU_ASSERT(rc == 0); 5438 5439 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5440 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5441 5442 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5443 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5444 5445 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5446 CU_ASSERT(rc == 0); 5447 5448 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5449 CU_ASSERT(rc == 0); 5450 5451 ch = spdk_get_io_channel(nvme_ctrlr); 5452 SPDK_CU_ASSERT_FATAL(ch != NULL); 5453 5454 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5455 5456 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5457 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5458 CU_ASSERT(path_id1->last_failed_tsc == 0); 5459 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5460 5461 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5462 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5463 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5464 5465 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5466 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5467 5468 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5469 * and a reconnect timer is started. */ 5470 ctrlr.fail_reset = true; 5471 ctrlr.is_failed = true; 5472 5473 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5474 CU_ASSERT(rc == 0); 5475 5476 poll_threads(); 5477 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5478 poll_threads(); 5479 5480 CU_ASSERT(nvme_ctrlr->resetting == false); 5481 CU_ASSERT(ctrlr.is_failed == false); 5482 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5483 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5484 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5485 CU_ASSERT(path_id1->last_failed_tsc != 0); 5486 5487 CU_ASSERT(path_id2->last_failed_tsc != 0); 5488 CU_ASSERT(path_id3->last_failed_tsc != 0); 5489 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5490 5491 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5492 * switched to trid2 but reset is not started. 5493 */ 5494 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, true); 5495 CU_ASSERT(rc == 0); 5496 5497 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5498 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5499 5500 CU_ASSERT(nvme_ctrlr->resetting == false); 5501 5502 /* If reconnect succeeds, trid2 should be the active path_id */ 5503 ctrlr.fail_reset = false; 5504 5505 spdk_delay_us(SPDK_SEC_TO_USEC); 5506 poll_thread_times(0, 1); 5507 5508 CU_ASSERT(nvme_ctrlr->resetting == true); 5509 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5510 5511 poll_threads(); 5512 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5513 poll_threads(); 5514 5515 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5516 CU_ASSERT(path_id2->last_failed_tsc == 0); 5517 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5518 CU_ASSERT(nvme_ctrlr->resetting == false); 5519 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5520 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5521 5522 spdk_put_io_channel(ch); 5523 5524 poll_threads(); 5525 5526 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5527 CU_ASSERT(rc == 0); 5528 5529 poll_threads(); 5530 spdk_delay_us(1000); 5531 poll_threads(); 5532 5533 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5534 } 5535 5536 static void 5537 test_fail_path(void) 5538 { 5539 struct nvme_path_id path = {}; 5540 struct nvme_ctrlr_opts opts = {}; 5541 struct spdk_nvme_ctrlr *ctrlr; 5542 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5543 struct nvme_ctrlr *nvme_ctrlr; 5544 const int STRING_SIZE = 32; 5545 const char *attached_names[STRING_SIZE]; 5546 struct nvme_bdev *bdev; 5547 struct nvme_ns *nvme_ns; 5548 struct spdk_bdev_io *bdev_io; 5549 struct spdk_io_channel *ch; 5550 struct nvme_bdev_channel *nbdev_ch; 5551 struct nvme_io_path *io_path; 5552 struct nvme_ctrlr_channel *ctrlr_ch; 5553 int rc; 5554 5555 /* The test scenario is the following. 5556 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5557 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5558 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5559 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5560 * comes first. The queued I/O is failed. 5561 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5562 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5563 */ 5564 5565 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5566 ut_init_trid(&path.trid); 5567 5568 set_thread(0); 5569 5570 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5571 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5572 5573 g_ut_attach_ctrlr_status = 0; 5574 g_ut_attach_bdev_count = 1; 5575 5576 opts.ctrlr_loss_timeout_sec = 4; 5577 opts.reconnect_delay_sec = 1; 5578 opts.fast_io_fail_timeout_sec = 2; 5579 5580 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5581 attach_ctrlr_done, NULL, NULL, &opts, false); 5582 CU_ASSERT(rc == 0); 5583 5584 spdk_delay_us(1000); 5585 poll_threads(); 5586 5587 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5588 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5589 5590 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5591 CU_ASSERT(nvme_ctrlr != NULL); 5592 5593 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5594 CU_ASSERT(bdev != NULL); 5595 5596 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5597 CU_ASSERT(nvme_ns != NULL); 5598 5599 ch = spdk_get_io_channel(bdev); 5600 SPDK_CU_ASSERT_FATAL(ch != NULL); 5601 5602 nbdev_ch = spdk_io_channel_get_ctx(ch); 5603 5604 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5605 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5606 5607 ctrlr_ch = io_path->qpair->ctrlr_ch; 5608 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5609 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5610 5611 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5612 ut_bdev_io_set_buf(bdev_io); 5613 5614 5615 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5616 ctrlr->fail_reset = true; 5617 ctrlr->is_failed = true; 5618 5619 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5620 CU_ASSERT(rc == 0); 5621 CU_ASSERT(nvme_ctrlr->resetting == true); 5622 CU_ASSERT(ctrlr->is_failed == true); 5623 5624 poll_threads(); 5625 5626 CU_ASSERT(nvme_ctrlr->resetting == false); 5627 CU_ASSERT(ctrlr->is_failed == false); 5628 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5629 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5630 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5631 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5632 5633 /* I/O should be queued. */ 5634 bdev_io->internal.in_submit_request = true; 5635 5636 bdev_nvme_submit_request(ch, bdev_io); 5637 5638 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5639 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5640 5641 /* After a second, the I/O should be still queued and the ctrlr should be 5642 * still recovering. 5643 */ 5644 spdk_delay_us(SPDK_SEC_TO_USEC); 5645 poll_threads(); 5646 5647 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5648 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5649 5650 CU_ASSERT(nvme_ctrlr->resetting == false); 5651 CU_ASSERT(ctrlr->is_failed == false); 5652 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5653 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5654 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5655 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5656 5657 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5658 5659 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5660 spdk_delay_us(SPDK_SEC_TO_USEC); 5661 poll_threads(); 5662 5663 CU_ASSERT(nvme_ctrlr->resetting == false); 5664 CU_ASSERT(ctrlr->is_failed == false); 5665 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5666 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5667 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5668 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5669 5670 /* Then within a second, pending I/O should be failed. */ 5671 spdk_delay_us(SPDK_SEC_TO_USEC); 5672 poll_threads(); 5673 5674 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5675 poll_threads(); 5676 5677 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5678 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5679 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5680 5681 /* Another I/O submission should be failed immediately. */ 5682 bdev_io->internal.in_submit_request = true; 5683 5684 bdev_nvme_submit_request(ch, bdev_io); 5685 5686 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5687 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5688 5689 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5690 * be deleted. 5691 */ 5692 spdk_delay_us(SPDK_SEC_TO_USEC); 5693 poll_threads(); 5694 5695 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5696 poll_threads(); 5697 5698 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5699 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5700 CU_ASSERT(nvme_ctrlr->destruct == true); 5701 5702 spdk_put_io_channel(ch); 5703 5704 poll_threads(); 5705 spdk_delay_us(1000); 5706 poll_threads(); 5707 5708 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5709 5710 free(bdev_io); 5711 } 5712 5713 static void 5714 test_nvme_ns_cmp(void) 5715 { 5716 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5717 5718 nvme_ns1.id = 0; 5719 nvme_ns2.id = UINT32_MAX; 5720 5721 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5722 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5723 } 5724 5725 static void 5726 test_ana_transition(void) 5727 { 5728 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5729 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5730 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5731 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5732 5733 /* case 1: ANA transition timedout is canceled. */ 5734 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5735 nvme_ns.ana_transition_timedout = true; 5736 5737 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5738 5739 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5740 5741 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5742 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5743 5744 /* case 2: ANATT timer is kept. */ 5745 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5746 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5747 &nvme_ns, 5748 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5749 5750 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5751 5752 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5753 5754 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5755 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5756 5757 /* case 3: ANATT timer is stopped. */ 5758 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5759 5760 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5761 5762 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5763 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5764 5765 /* ANATT timer is started. */ 5766 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5767 5768 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5769 5770 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5771 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5772 5773 /* ANATT timer is expired. */ 5774 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5775 5776 poll_threads(); 5777 5778 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5779 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5780 } 5781 5782 static void 5783 _set_preferred_path_cb(void *cb_arg, int rc) 5784 { 5785 bool *done = cb_arg; 5786 5787 *done = true; 5788 } 5789 5790 static void 5791 test_set_preferred_path(void) 5792 { 5793 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5794 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5795 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5796 const int STRING_SIZE = 32; 5797 const char *attached_names[STRING_SIZE]; 5798 struct nvme_bdev *bdev; 5799 struct spdk_io_channel *ch; 5800 struct nvme_bdev_channel *nbdev_ch; 5801 struct nvme_io_path *io_path; 5802 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5803 const struct spdk_nvme_ctrlr_data *cdata; 5804 bool done; 5805 int rc; 5806 5807 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5808 ut_init_trid(&path1.trid); 5809 ut_init_trid2(&path2.trid); 5810 ut_init_trid3(&path3.trid); 5811 g_ut_attach_ctrlr_status = 0; 5812 g_ut_attach_bdev_count = 1; 5813 5814 set_thread(0); 5815 5816 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5817 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5818 5819 ctrlr1->ns[0].uuid = &uuid1; 5820 5821 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5822 attach_ctrlr_done, NULL, NULL, NULL, true); 5823 CU_ASSERT(rc == 0); 5824 5825 spdk_delay_us(1000); 5826 poll_threads(); 5827 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5828 poll_threads(); 5829 5830 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5831 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5832 5833 ctrlr2->ns[0].uuid = &uuid1; 5834 5835 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5836 attach_ctrlr_done, NULL, NULL, NULL, true); 5837 CU_ASSERT(rc == 0); 5838 5839 spdk_delay_us(1000); 5840 poll_threads(); 5841 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5842 poll_threads(); 5843 5844 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5845 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5846 5847 ctrlr3->ns[0].uuid = &uuid1; 5848 5849 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5850 attach_ctrlr_done, NULL, NULL, NULL, true); 5851 CU_ASSERT(rc == 0); 5852 5853 spdk_delay_us(1000); 5854 poll_threads(); 5855 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5856 poll_threads(); 5857 5858 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5859 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5860 5861 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5862 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5863 5864 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5865 5866 ch = spdk_get_io_channel(bdev); 5867 SPDK_CU_ASSERT_FATAL(ch != NULL); 5868 nbdev_ch = spdk_io_channel_get_ctx(ch); 5869 5870 io_path = bdev_nvme_find_io_path(nbdev_ch); 5871 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5872 5873 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5874 5875 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5876 * should return io_path to ctrlr2. 5877 */ 5878 5879 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5880 done = false; 5881 5882 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5883 5884 poll_threads(); 5885 CU_ASSERT(done == true); 5886 5887 io_path = bdev_nvme_find_io_path(nbdev_ch); 5888 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5889 5890 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5891 5892 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5893 * acquired, find_io_path() should return io_path to ctrlr3. 5894 */ 5895 5896 spdk_put_io_channel(ch); 5897 5898 poll_threads(); 5899 5900 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5901 done = false; 5902 5903 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5904 5905 poll_threads(); 5906 CU_ASSERT(done == true); 5907 5908 ch = spdk_get_io_channel(bdev); 5909 SPDK_CU_ASSERT_FATAL(ch != NULL); 5910 nbdev_ch = spdk_io_channel_get_ctx(ch); 5911 5912 io_path = bdev_nvme_find_io_path(nbdev_ch); 5913 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5914 5915 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5916 5917 spdk_put_io_channel(ch); 5918 5919 poll_threads(); 5920 5921 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5922 CU_ASSERT(rc == 0); 5923 5924 poll_threads(); 5925 spdk_delay_us(1000); 5926 poll_threads(); 5927 5928 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5929 } 5930 5931 static void 5932 test_find_next_io_path(void) 5933 { 5934 struct nvme_bdev_channel nbdev_ch = { 5935 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5936 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5937 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 5938 }; 5939 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5940 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5941 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5942 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5943 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5944 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5945 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5946 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5947 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5948 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5949 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5950 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 5951 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5952 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5953 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5954 5955 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 5956 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 5957 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 5958 5959 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 5960 * is covered in test_find_io_path. 5961 */ 5962 5963 nbdev_ch.current_io_path = &io_path2; 5964 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5965 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5966 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5967 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5968 5969 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5970 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5971 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5972 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5973 5974 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5975 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5976 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5977 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5978 5979 nbdev_ch.current_io_path = &io_path3; 5980 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5981 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5982 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5983 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5984 5985 /* Test if next io_path is selected according to rr_min_io */ 5986 5987 nbdev_ch.current_io_path = NULL; 5988 nbdev_ch.rr_min_io = 2; 5989 nbdev_ch.rr_counter = 0; 5990 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5991 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5992 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5993 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5994 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5995 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5996 5997 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5998 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5999 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6000 } 6001 6002 static void 6003 test_find_io_path_min_qd(void) 6004 { 6005 struct nvme_bdev_channel nbdev_ch = { 6006 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6007 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6008 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6009 }; 6010 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6011 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6012 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6013 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6014 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6015 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6016 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6017 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6018 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6019 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6020 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6021 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 6022 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6023 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6024 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6025 6026 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6027 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6028 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6029 6030 /* Test if the minumum io_outstanding or the ANA optimized state is 6031 * prioritized when using least queue depth selector 6032 */ 6033 qpair1.num_outstanding_reqs = 2; 6034 qpair2.num_outstanding_reqs = 1; 6035 qpair3.num_outstanding_reqs = 0; 6036 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6037 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6038 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6039 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6040 6041 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6042 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6043 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6044 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6045 6046 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6047 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6048 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6049 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6050 6051 qpair2.num_outstanding_reqs = 4; 6052 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6053 } 6054 6055 static void 6056 test_disable_auto_failback(void) 6057 { 6058 struct nvme_path_id path1 = {}, path2 = {}; 6059 struct nvme_ctrlr_opts opts = {}; 6060 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6061 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6062 struct nvme_ctrlr *nvme_ctrlr1; 6063 const int STRING_SIZE = 32; 6064 const char *attached_names[STRING_SIZE]; 6065 struct nvme_bdev *bdev; 6066 struct spdk_io_channel *ch; 6067 struct nvme_bdev_channel *nbdev_ch; 6068 struct nvme_io_path *io_path; 6069 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6070 const struct spdk_nvme_ctrlr_data *cdata; 6071 bool done; 6072 int rc; 6073 6074 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6075 ut_init_trid(&path1.trid); 6076 ut_init_trid2(&path2.trid); 6077 g_ut_attach_ctrlr_status = 0; 6078 g_ut_attach_bdev_count = 1; 6079 6080 g_opts.disable_auto_failback = true; 6081 6082 opts.ctrlr_loss_timeout_sec = -1; 6083 opts.reconnect_delay_sec = 1; 6084 6085 set_thread(0); 6086 6087 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6088 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6089 6090 ctrlr1->ns[0].uuid = &uuid1; 6091 6092 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6093 attach_ctrlr_done, NULL, NULL, &opts, true); 6094 CU_ASSERT(rc == 0); 6095 6096 spdk_delay_us(1000); 6097 poll_threads(); 6098 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6099 poll_threads(); 6100 6101 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6102 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6103 6104 ctrlr2->ns[0].uuid = &uuid1; 6105 6106 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6107 attach_ctrlr_done, NULL, NULL, &opts, true); 6108 CU_ASSERT(rc == 0); 6109 6110 spdk_delay_us(1000); 6111 poll_threads(); 6112 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6113 poll_threads(); 6114 6115 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6116 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6117 6118 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6119 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6120 6121 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6122 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6123 6124 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6125 6126 ch = spdk_get_io_channel(bdev); 6127 SPDK_CU_ASSERT_FATAL(ch != NULL); 6128 nbdev_ch = spdk_io_channel_get_ctx(ch); 6129 6130 io_path = bdev_nvme_find_io_path(nbdev_ch); 6131 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6132 6133 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6134 6135 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6136 ctrlr1->fail_reset = true; 6137 ctrlr1->is_failed = true; 6138 6139 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6140 6141 poll_threads(); 6142 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6143 poll_threads(); 6144 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6145 poll_threads(); 6146 6147 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6148 6149 io_path = bdev_nvme_find_io_path(nbdev_ch); 6150 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6151 6152 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6153 6154 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6155 * Hence, io_path to ctrlr2 should still be used. 6156 */ 6157 ctrlr1->fail_reset = false; 6158 6159 spdk_delay_us(SPDK_SEC_TO_USEC); 6160 poll_threads(); 6161 6162 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6163 6164 io_path = bdev_nvme_find_io_path(nbdev_ch); 6165 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6166 6167 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6168 6169 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6170 * be used again. 6171 */ 6172 6173 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6174 done = false; 6175 6176 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6177 6178 poll_threads(); 6179 CU_ASSERT(done == true); 6180 6181 io_path = bdev_nvme_find_io_path(nbdev_ch); 6182 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6183 6184 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6185 6186 spdk_put_io_channel(ch); 6187 6188 poll_threads(); 6189 6190 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6191 CU_ASSERT(rc == 0); 6192 6193 poll_threads(); 6194 spdk_delay_us(1000); 6195 poll_threads(); 6196 6197 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6198 6199 g_opts.disable_auto_failback = false; 6200 } 6201 6202 static void 6203 ut_set_multipath_policy_done(void *cb_arg, int rc) 6204 { 6205 int *done = cb_arg; 6206 6207 SPDK_CU_ASSERT_FATAL(done != NULL); 6208 *done = rc; 6209 } 6210 6211 static void 6212 test_set_multipath_policy(void) 6213 { 6214 struct nvme_path_id path1 = {}, path2 = {}; 6215 struct nvme_ctrlr_opts opts = {}; 6216 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6217 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6218 const int STRING_SIZE = 32; 6219 const char *attached_names[STRING_SIZE]; 6220 struct nvme_bdev *bdev; 6221 struct spdk_io_channel *ch; 6222 struct nvme_bdev_channel *nbdev_ch; 6223 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6224 int done; 6225 int rc; 6226 6227 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6228 ut_init_trid(&path1.trid); 6229 ut_init_trid2(&path2.trid); 6230 g_ut_attach_ctrlr_status = 0; 6231 g_ut_attach_bdev_count = 1; 6232 6233 g_opts.disable_auto_failback = true; 6234 6235 opts.ctrlr_loss_timeout_sec = -1; 6236 opts.reconnect_delay_sec = 1; 6237 6238 set_thread(0); 6239 6240 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6241 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6242 6243 ctrlr1->ns[0].uuid = &uuid1; 6244 6245 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6246 attach_ctrlr_done, NULL, NULL, &opts, true); 6247 CU_ASSERT(rc == 0); 6248 6249 spdk_delay_us(1000); 6250 poll_threads(); 6251 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6252 poll_threads(); 6253 6254 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6255 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6256 6257 ctrlr2->ns[0].uuid = &uuid1; 6258 6259 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6260 attach_ctrlr_done, NULL, NULL, &opts, true); 6261 CU_ASSERT(rc == 0); 6262 6263 spdk_delay_us(1000); 6264 poll_threads(); 6265 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6266 poll_threads(); 6267 6268 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6269 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6270 6271 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6272 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6273 6274 /* If multipath policy is updated before getting any I/O channel, 6275 * an new I/O channel should have the update. 6276 */ 6277 done = -1; 6278 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6279 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6280 ut_set_multipath_policy_done, &done); 6281 poll_threads(); 6282 CU_ASSERT(done == 0); 6283 6284 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6285 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6286 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6287 6288 ch = spdk_get_io_channel(bdev); 6289 SPDK_CU_ASSERT_FATAL(ch != NULL); 6290 nbdev_ch = spdk_io_channel_get_ctx(ch); 6291 6292 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6293 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6294 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6295 6296 /* If multipath policy is updated while a I/O channel is active, 6297 * the update should be applied to the I/O channel immediately. 6298 */ 6299 done = -1; 6300 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6301 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6302 ut_set_multipath_policy_done, &done); 6303 poll_threads(); 6304 CU_ASSERT(done == 0); 6305 6306 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6307 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6308 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6309 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6310 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6311 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6312 6313 spdk_put_io_channel(ch); 6314 6315 poll_threads(); 6316 6317 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6318 CU_ASSERT(rc == 0); 6319 6320 poll_threads(); 6321 spdk_delay_us(1000); 6322 poll_threads(); 6323 6324 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6325 } 6326 6327 static void 6328 test_uuid_generation(void) 6329 { 6330 uint32_t nsid1 = 1, nsid2 = 2; 6331 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6332 char sn3[21] = " "; 6333 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6334 struct spdk_uuid uuid1, uuid2; 6335 6336 /* Test case 1: 6337 * Serial numbers are the same, nsids are different. 6338 * Compare two generated UUID - they should be different. */ 6339 uuid1 = nvme_generate_uuid(sn1, nsid1); 6340 uuid2 = nvme_generate_uuid(sn1, nsid2); 6341 6342 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6343 6344 /* Test case 2: 6345 * Serial numbers differ only by one character, nsids are the same. 6346 * Compare two generated UUID - they should be different. */ 6347 uuid1 = nvme_generate_uuid(sn1, nsid1); 6348 uuid2 = nvme_generate_uuid(sn2, nsid1); 6349 6350 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6351 6352 /* Test case 3: 6353 * Serial number comprises only of space characters. 6354 * Validate the generated UUID. */ 6355 uuid1 = nvme_generate_uuid(sn3, nsid1); 6356 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6357 } 6358 6359 static void 6360 test_retry_io_to_same_path(void) 6361 { 6362 struct nvme_path_id path1 = {}, path2 = {}; 6363 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6364 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6365 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6366 const int STRING_SIZE = 32; 6367 const char *attached_names[STRING_SIZE]; 6368 struct nvme_bdev *bdev; 6369 struct spdk_bdev_io *bdev_io; 6370 struct nvme_bdev_io *bio; 6371 struct spdk_io_channel *ch; 6372 struct nvme_bdev_channel *nbdev_ch; 6373 struct nvme_io_path *io_path1, *io_path2; 6374 struct ut_nvme_req *req; 6375 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6376 int done; 6377 int rc; 6378 6379 g_opts.nvme_ioq_poll_period_us = 1; 6380 6381 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6382 ut_init_trid(&path1.trid); 6383 ut_init_trid2(&path2.trid); 6384 g_ut_attach_ctrlr_status = 0; 6385 g_ut_attach_bdev_count = 1; 6386 6387 set_thread(0); 6388 6389 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6390 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6391 6392 ctrlr1->ns[0].uuid = &uuid1; 6393 6394 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6395 attach_ctrlr_done, NULL, NULL, NULL, true); 6396 CU_ASSERT(rc == 0); 6397 6398 spdk_delay_us(1000); 6399 poll_threads(); 6400 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6401 poll_threads(); 6402 6403 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6404 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6405 6406 ctrlr2->ns[0].uuid = &uuid1; 6407 6408 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6409 attach_ctrlr_done, NULL, NULL, NULL, true); 6410 CU_ASSERT(rc == 0); 6411 6412 spdk_delay_us(1000); 6413 poll_threads(); 6414 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6415 poll_threads(); 6416 6417 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6418 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6419 6420 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6421 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6422 6423 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 6424 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6425 6426 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6427 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6428 6429 done = -1; 6430 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6431 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6432 poll_threads(); 6433 CU_ASSERT(done == 0); 6434 6435 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6436 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6437 CU_ASSERT(bdev->rr_min_io == 1); 6438 6439 ch = spdk_get_io_channel(bdev); 6440 SPDK_CU_ASSERT_FATAL(ch != NULL); 6441 nbdev_ch = spdk_io_channel_get_ctx(ch); 6442 6443 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6444 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6445 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6446 6447 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6448 ut_bdev_io_set_buf(bdev_io); 6449 6450 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6451 6452 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6453 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6454 6455 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6456 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6457 6458 /* The 1st I/O should be submitted to io_path1. */ 6459 bdev_io->internal.in_submit_request = true; 6460 6461 bdev_nvme_submit_request(ch, bdev_io); 6462 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6463 CU_ASSERT(bio->io_path == io_path1); 6464 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6465 6466 spdk_delay_us(1); 6467 6468 poll_threads(); 6469 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6470 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6471 6472 /* The 2nd I/O should be submitted to io_path2 because the path selection 6473 * policy is round-robin. 6474 */ 6475 bdev_io->internal.in_submit_request = true; 6476 6477 bdev_nvme_submit_request(ch, bdev_io); 6478 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6479 CU_ASSERT(bio->io_path == io_path2); 6480 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6481 6482 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6483 SPDK_CU_ASSERT_FATAL(req != NULL); 6484 6485 /* Set retry count to non-zero. */ 6486 g_opts.bdev_retry_count = 2; 6487 6488 /* Inject an I/O error. */ 6489 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6490 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6491 6492 /* The 2nd I/O should be queued to nbdev_ch. */ 6493 spdk_delay_us(1); 6494 poll_thread_times(0, 1); 6495 6496 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6497 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6498 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6499 6500 /* The 2nd I/O should keep caching io_path2. */ 6501 CU_ASSERT(bio->io_path == io_path2); 6502 6503 /* The 2nd I/O should be submitted to io_path2 again. */ 6504 poll_thread_times(0, 1); 6505 6506 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6507 CU_ASSERT(bio->io_path == io_path2); 6508 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6509 6510 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6511 SPDK_CU_ASSERT_FATAL(req != NULL); 6512 6513 /* Inject an I/O error again. */ 6514 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6515 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6516 req->cpl.status.crd = 1; 6517 6518 ctrlr2->cdata.crdt[1] = 1; 6519 6520 /* The 2nd I/O should be queued to nbdev_ch. */ 6521 spdk_delay_us(1); 6522 poll_thread_times(0, 1); 6523 6524 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6525 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6526 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6527 6528 /* The 2nd I/O should keep caching io_path2. */ 6529 CU_ASSERT(bio->io_path == io_path2); 6530 6531 /* Detach ctrlr2 dynamically. */ 6532 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6533 CU_ASSERT(rc == 0); 6534 6535 spdk_delay_us(1000); 6536 poll_threads(); 6537 spdk_delay_us(1000); 6538 poll_threads(); 6539 spdk_delay_us(1000); 6540 poll_threads(); 6541 spdk_delay_us(1000); 6542 poll_threads(); 6543 6544 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 6545 6546 poll_threads(); 6547 spdk_delay_us(100000); 6548 poll_threads(); 6549 spdk_delay_us(1); 6550 poll_threads(); 6551 6552 /* The 2nd I/O should succeed by io_path1. */ 6553 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6554 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6555 CU_ASSERT(bio->io_path == io_path1); 6556 6557 free(bdev_io); 6558 6559 spdk_put_io_channel(ch); 6560 6561 poll_threads(); 6562 spdk_delay_us(1); 6563 poll_threads(); 6564 6565 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6566 CU_ASSERT(rc == 0); 6567 6568 poll_threads(); 6569 spdk_delay_us(1000); 6570 poll_threads(); 6571 6572 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6573 6574 g_opts.nvme_ioq_poll_period_us = 0; 6575 g_opts.bdev_retry_count = 0; 6576 } 6577 6578 /* This case is to verify a fix for a complex race condition that 6579 * failover is lost if fabric connect command gets timeout while 6580 * controller is being reset. 6581 */ 6582 static void 6583 test_race_between_reset_and_disconnected(void) 6584 { 6585 struct spdk_nvme_transport_id trid = {}; 6586 struct spdk_nvme_ctrlr ctrlr = {}; 6587 struct nvme_ctrlr *nvme_ctrlr = NULL; 6588 struct nvme_path_id *curr_trid; 6589 struct spdk_io_channel *ch1, *ch2; 6590 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6591 int rc; 6592 6593 ut_init_trid(&trid); 6594 TAILQ_INIT(&ctrlr.active_io_qpairs); 6595 6596 set_thread(0); 6597 6598 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6599 CU_ASSERT(rc == 0); 6600 6601 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6602 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6603 6604 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6605 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6606 6607 ch1 = spdk_get_io_channel(nvme_ctrlr); 6608 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6609 6610 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6611 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6612 6613 set_thread(1); 6614 6615 ch2 = spdk_get_io_channel(nvme_ctrlr); 6616 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6617 6618 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6619 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6620 6621 /* Reset starts from thread 1. */ 6622 set_thread(1); 6623 6624 nvme_ctrlr->resetting = false; 6625 curr_trid->last_failed_tsc = spdk_get_ticks(); 6626 ctrlr.is_failed = true; 6627 6628 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6629 CU_ASSERT(rc == 0); 6630 CU_ASSERT(nvme_ctrlr->resetting == true); 6631 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6632 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6633 6634 poll_thread_times(0, 3); 6635 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6636 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6637 6638 poll_thread_times(0, 1); 6639 poll_thread_times(1, 1); 6640 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6641 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6642 CU_ASSERT(ctrlr.is_failed == true); 6643 6644 poll_thread_times(1, 1); 6645 poll_thread_times(0, 1); 6646 CU_ASSERT(ctrlr.is_failed == false); 6647 CU_ASSERT(ctrlr.adminq.is_connected == false); 6648 6649 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6650 poll_thread_times(0, 2); 6651 CU_ASSERT(ctrlr.adminq.is_connected == true); 6652 6653 poll_thread_times(0, 1); 6654 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6655 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6656 6657 poll_thread_times(1, 1); 6658 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6659 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6660 CU_ASSERT(nvme_ctrlr->resetting == true); 6661 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6662 6663 poll_thread_times(0, 2); 6664 CU_ASSERT(nvme_ctrlr->resetting == true); 6665 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6666 poll_thread_times(1, 1); 6667 CU_ASSERT(nvme_ctrlr->resetting == true); 6668 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6669 6670 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6671 * 6672 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6673 * connect command is executed. If fabric connect command gets timeout, 6674 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6675 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6676 * 6677 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6678 */ 6679 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr, false); 6680 CU_ASSERT(rc == -EINPROGRESS); 6681 CU_ASSERT(nvme_ctrlr->resetting == true); 6682 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6683 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6684 6685 poll_thread_times(0, 1); 6686 6687 CU_ASSERT(nvme_ctrlr->resetting == true); 6688 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6689 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6690 6691 poll_threads(); 6692 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6693 poll_threads(); 6694 6695 CU_ASSERT(nvme_ctrlr->resetting == false); 6696 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6697 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6698 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6699 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6700 6701 spdk_put_io_channel(ch2); 6702 6703 set_thread(0); 6704 6705 spdk_put_io_channel(ch1); 6706 6707 poll_threads(); 6708 6709 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6710 CU_ASSERT(rc == 0); 6711 6712 poll_threads(); 6713 spdk_delay_us(1000); 6714 poll_threads(); 6715 6716 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6717 } 6718 static void 6719 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6720 { 6721 int *_rc = (int *)cb_arg; 6722 6723 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6724 *_rc = rc; 6725 } 6726 6727 static void 6728 test_ctrlr_op_rpc(void) 6729 { 6730 struct spdk_nvme_transport_id trid = {}; 6731 struct spdk_nvme_ctrlr ctrlr = {}; 6732 struct nvme_ctrlr *nvme_ctrlr = NULL; 6733 struct nvme_path_id *curr_trid; 6734 struct spdk_io_channel *ch1, *ch2; 6735 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6736 int ctrlr_op_rc; 6737 int rc; 6738 6739 ut_init_trid(&trid); 6740 TAILQ_INIT(&ctrlr.active_io_qpairs); 6741 6742 set_thread(0); 6743 6744 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6745 CU_ASSERT(rc == 0); 6746 6747 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6748 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6749 6750 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6751 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6752 6753 ch1 = spdk_get_io_channel(nvme_ctrlr); 6754 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6755 6756 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6757 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6758 6759 set_thread(1); 6760 6761 ch2 = spdk_get_io_channel(nvme_ctrlr); 6762 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6763 6764 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6765 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6766 6767 /* Reset starts from thread 1. */ 6768 set_thread(1); 6769 6770 /* Case 1: ctrlr is already being destructed. */ 6771 nvme_ctrlr->destruct = true; 6772 ctrlr_op_rc = 0; 6773 6774 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6775 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6776 6777 poll_threads(); 6778 6779 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6780 6781 /* Case 2: reset is in progress. */ 6782 nvme_ctrlr->destruct = false; 6783 nvme_ctrlr->resetting = true; 6784 ctrlr_op_rc = 0; 6785 6786 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6787 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6788 6789 poll_threads(); 6790 6791 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6792 6793 /* Case 3: reset completes successfully. */ 6794 nvme_ctrlr->resetting = false; 6795 curr_trid->last_failed_tsc = spdk_get_ticks(); 6796 ctrlr.is_failed = true; 6797 ctrlr_op_rc = -1; 6798 6799 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6800 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6801 6802 CU_ASSERT(nvme_ctrlr->resetting == true); 6803 CU_ASSERT(ctrlr_op_rc == -1); 6804 6805 poll_threads(); 6806 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6807 poll_threads(); 6808 6809 CU_ASSERT(nvme_ctrlr->resetting == false); 6810 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6811 CU_ASSERT(ctrlr.is_failed == false); 6812 CU_ASSERT(ctrlr_op_rc == 0); 6813 6814 /* Case 4: invalid operation. */ 6815 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6816 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6817 6818 poll_threads(); 6819 6820 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6821 6822 spdk_put_io_channel(ch2); 6823 6824 set_thread(0); 6825 6826 spdk_put_io_channel(ch1); 6827 6828 poll_threads(); 6829 6830 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6831 CU_ASSERT(rc == 0); 6832 6833 poll_threads(); 6834 spdk_delay_us(1000); 6835 poll_threads(); 6836 6837 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6838 } 6839 6840 static void 6841 test_bdev_ctrlr_op_rpc(void) 6842 { 6843 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6844 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6845 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6846 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6847 struct nvme_path_id *curr_trid1, *curr_trid2; 6848 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6849 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6850 int ctrlr_op_rc; 6851 int rc; 6852 6853 ut_init_trid(&trid1); 6854 ut_init_trid2(&trid2); 6855 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6856 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6857 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6858 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6859 ctrlr1.cdata.cntlid = 1; 6860 ctrlr2.cdata.cntlid = 2; 6861 ctrlr1.adminq.is_connected = true; 6862 ctrlr2.adminq.is_connected = true; 6863 6864 set_thread(0); 6865 6866 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6867 CU_ASSERT(rc == 0); 6868 6869 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6870 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6871 6872 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1); 6873 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6874 6875 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6876 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6877 6878 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6879 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6880 6881 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6882 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6883 6884 set_thread(1); 6885 6886 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6887 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6888 6889 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6890 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6891 6892 set_thread(0); 6893 6894 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6895 CU_ASSERT(rc == 0); 6896 6897 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2); 6898 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6899 6900 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 6901 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 6902 6903 ch21 = spdk_get_io_channel(nvme_ctrlr2); 6904 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 6905 6906 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 6907 CU_ASSERT(ctrlr_ch21->qpair != NULL); 6908 6909 set_thread(1); 6910 6911 ch22 = spdk_get_io_channel(nvme_ctrlr2); 6912 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 6913 6914 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 6915 CU_ASSERT(ctrlr_ch22->qpair != NULL); 6916 6917 /* Reset starts from thread 1. */ 6918 set_thread(1); 6919 6920 nvme_ctrlr1->resetting = false; 6921 nvme_ctrlr2->resetting = false; 6922 curr_trid1->last_failed_tsc = spdk_get_ticks(); 6923 curr_trid2->last_failed_tsc = spdk_get_ticks(); 6924 ctrlr_op_rc = -1; 6925 6926 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 6927 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6928 6929 CU_ASSERT(nvme_ctrlr1->resetting == true); 6930 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6931 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6932 CU_ASSERT(nvme_ctrlr2->resetting == false); 6933 6934 poll_thread_times(0, 3); 6935 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6936 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 6937 6938 poll_thread_times(0, 1); 6939 poll_thread_times(1, 1); 6940 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6941 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 6942 6943 poll_thread_times(1, 1); 6944 poll_thread_times(0, 1); 6945 CU_ASSERT(ctrlr1.adminq.is_connected == false); 6946 6947 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6948 poll_thread_times(0, 2); 6949 CU_ASSERT(ctrlr1.adminq.is_connected == true); 6950 6951 poll_thread_times(0, 1); 6952 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 6953 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 6954 6955 poll_thread_times(1, 1); 6956 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 6957 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 6958 CU_ASSERT(nvme_ctrlr1->resetting == true); 6959 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 6960 6961 poll_thread_times(0, 2); 6962 poll_thread_times(1, 1); 6963 poll_thread_times(0, 1); 6964 poll_thread_times(1, 1); 6965 poll_thread_times(0, 1); 6966 poll_thread_times(1, 1); 6967 poll_thread_times(0, 1); 6968 6969 CU_ASSERT(nvme_ctrlr1->resetting == false); 6970 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 6971 CU_ASSERT(nvme_ctrlr2->resetting == true); 6972 6973 poll_threads(); 6974 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6975 poll_threads(); 6976 6977 CU_ASSERT(nvme_ctrlr2->resetting == false); 6978 CU_ASSERT(ctrlr_op_rc == 0); 6979 6980 set_thread(1); 6981 6982 spdk_put_io_channel(ch12); 6983 spdk_put_io_channel(ch22); 6984 6985 set_thread(0); 6986 6987 spdk_put_io_channel(ch11); 6988 spdk_put_io_channel(ch21); 6989 6990 poll_threads(); 6991 6992 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6993 CU_ASSERT(rc == 0); 6994 6995 poll_threads(); 6996 spdk_delay_us(1000); 6997 poll_threads(); 6998 6999 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7000 } 7001 7002 static void 7003 test_disable_enable_ctrlr(void) 7004 { 7005 struct spdk_nvme_transport_id trid = {}; 7006 struct spdk_nvme_ctrlr ctrlr = {}; 7007 struct nvme_ctrlr *nvme_ctrlr = NULL; 7008 struct nvme_path_id *curr_trid; 7009 struct spdk_io_channel *ch1, *ch2; 7010 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7011 int rc; 7012 7013 ut_init_trid(&trid); 7014 TAILQ_INIT(&ctrlr.active_io_qpairs); 7015 ctrlr.adminq.is_connected = true; 7016 7017 set_thread(0); 7018 7019 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7020 CU_ASSERT(rc == 0); 7021 7022 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7023 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7024 7025 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7026 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7027 7028 ch1 = spdk_get_io_channel(nvme_ctrlr); 7029 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7030 7031 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7032 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7033 7034 set_thread(1); 7035 7036 ch2 = spdk_get_io_channel(nvme_ctrlr); 7037 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7038 7039 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7040 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7041 7042 /* Disable starts from thread 1. */ 7043 set_thread(1); 7044 7045 /* Case 1: ctrlr is already disabled. */ 7046 nvme_ctrlr->disabled = true; 7047 7048 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7049 CU_ASSERT(rc == -EALREADY); 7050 7051 /* Case 2: ctrlr is already being destructed. */ 7052 nvme_ctrlr->disabled = false; 7053 nvme_ctrlr->destruct = true; 7054 7055 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7056 CU_ASSERT(rc == -ENXIO); 7057 7058 /* Case 3: reset is in progress. */ 7059 nvme_ctrlr->destruct = false; 7060 nvme_ctrlr->resetting = true; 7061 7062 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7063 CU_ASSERT(rc == -EBUSY); 7064 7065 /* Case 4: disable completes successfully. */ 7066 nvme_ctrlr->resetting = false; 7067 7068 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7069 CU_ASSERT(rc == 0); 7070 CU_ASSERT(nvme_ctrlr->resetting == true); 7071 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7072 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7073 7074 poll_thread_times(0, 3); 7075 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7076 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7077 7078 poll_thread_times(0, 1); 7079 poll_thread_times(1, 1); 7080 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7081 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7082 7083 poll_thread_times(1, 1); 7084 poll_thread_times(0, 1); 7085 CU_ASSERT(ctrlr.adminq.is_connected == false); 7086 poll_thread_times(1, 1); 7087 poll_thread_times(0, 1); 7088 poll_thread_times(1, 1); 7089 poll_thread_times(0, 1); 7090 CU_ASSERT(nvme_ctrlr->resetting == false); 7091 CU_ASSERT(nvme_ctrlr->disabled == true); 7092 7093 /* Case 5: enable completes successfully. */ 7094 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7095 CU_ASSERT(rc == 0); 7096 7097 CU_ASSERT(nvme_ctrlr->resetting == true); 7098 CU_ASSERT(nvme_ctrlr->disabled == false); 7099 7100 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7101 poll_thread_times(0, 2); 7102 CU_ASSERT(ctrlr.adminq.is_connected == true); 7103 7104 poll_thread_times(0, 1); 7105 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7106 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7107 7108 poll_thread_times(1, 1); 7109 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7110 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7111 CU_ASSERT(nvme_ctrlr->resetting == true); 7112 7113 poll_thread_times(0, 2); 7114 CU_ASSERT(nvme_ctrlr->resetting == true); 7115 poll_thread_times(1, 1); 7116 CU_ASSERT(nvme_ctrlr->resetting == true); 7117 poll_thread_times(0, 1); 7118 CU_ASSERT(nvme_ctrlr->resetting == false); 7119 7120 /* Case 6: ctrlr is already enabled. */ 7121 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7122 CU_ASSERT(rc == -EALREADY); 7123 7124 set_thread(0); 7125 7126 /* Case 7: disable cancels delayed reconnect. */ 7127 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7128 ctrlr.fail_reset = true; 7129 7130 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7131 CU_ASSERT(rc == 0); 7132 7133 poll_threads(); 7134 7135 CU_ASSERT(nvme_ctrlr->resetting == false); 7136 CU_ASSERT(ctrlr.is_failed == false); 7137 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7138 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7139 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7140 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7141 7142 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7143 CU_ASSERT(rc == 0); 7144 7145 CU_ASSERT(nvme_ctrlr->resetting == true); 7146 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7147 7148 poll_threads(); 7149 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7150 poll_threads(); 7151 7152 CU_ASSERT(nvme_ctrlr->resetting == false); 7153 CU_ASSERT(nvme_ctrlr->disabled == true); 7154 7155 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7156 CU_ASSERT(rc == 0); 7157 7158 CU_ASSERT(nvme_ctrlr->resetting == true); 7159 CU_ASSERT(nvme_ctrlr->disabled == false); 7160 7161 poll_threads(); 7162 7163 CU_ASSERT(nvme_ctrlr->resetting == false); 7164 7165 set_thread(1); 7166 7167 spdk_put_io_channel(ch2); 7168 7169 set_thread(0); 7170 7171 spdk_put_io_channel(ch1); 7172 7173 poll_threads(); 7174 7175 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7176 CU_ASSERT(rc == 0); 7177 7178 poll_threads(); 7179 spdk_delay_us(1000); 7180 poll_threads(); 7181 7182 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7183 } 7184 7185 static void 7186 ut_delete_done(void *ctx, int rc) 7187 { 7188 int *delete_done_rc = ctx; 7189 *delete_done_rc = rc; 7190 } 7191 7192 static void 7193 test_delete_ctrlr_done(void) 7194 { 7195 struct spdk_nvme_transport_id trid = {}; 7196 struct spdk_nvme_ctrlr ctrlr = {}; 7197 int delete_done_rc = 0xDEADBEEF; 7198 int rc; 7199 7200 ut_init_trid(&trid); 7201 7202 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7203 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7204 7205 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7206 CU_ASSERT(rc == 0); 7207 7208 for (int i = 0; i < 20; i++) { 7209 poll_threads(); 7210 if (delete_done_rc == 0) { 7211 break; 7212 } 7213 spdk_delay_us(1000); 7214 } 7215 7216 CU_ASSERT(delete_done_rc == 0); 7217 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7218 } 7219 7220 int 7221 main(int argc, char **argv) 7222 { 7223 CU_pSuite suite = NULL; 7224 unsigned int num_failures; 7225 7226 CU_initialize_registry(); 7227 7228 suite = CU_add_suite("nvme", NULL, NULL); 7229 7230 CU_ADD_TEST(suite, test_create_ctrlr); 7231 CU_ADD_TEST(suite, test_reset_ctrlr); 7232 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7233 CU_ADD_TEST(suite, test_failover_ctrlr); 7234 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7235 CU_ADD_TEST(suite, test_pending_reset); 7236 CU_ADD_TEST(suite, test_attach_ctrlr); 7237 CU_ADD_TEST(suite, test_aer_cb); 7238 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7239 CU_ADD_TEST(suite, test_add_remove_trid); 7240 CU_ADD_TEST(suite, test_abort); 7241 CU_ADD_TEST(suite, test_get_io_qpair); 7242 CU_ADD_TEST(suite, test_bdev_unregister); 7243 CU_ADD_TEST(suite, test_compare_ns); 7244 CU_ADD_TEST(suite, test_init_ana_log_page); 7245 CU_ADD_TEST(suite, test_get_memory_domains); 7246 CU_ADD_TEST(suite, test_reconnect_qpair); 7247 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7248 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7249 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7250 CU_ADD_TEST(suite, test_admin_path); 7251 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7252 CU_ADD_TEST(suite, test_find_io_path); 7253 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7254 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7255 CU_ADD_TEST(suite, test_retry_io_count); 7256 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7257 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7258 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7259 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7260 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7261 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7262 CU_ADD_TEST(suite, test_fail_path); 7263 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7264 CU_ADD_TEST(suite, test_ana_transition); 7265 CU_ADD_TEST(suite, test_set_preferred_path); 7266 CU_ADD_TEST(suite, test_find_next_io_path); 7267 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7268 CU_ADD_TEST(suite, test_disable_auto_failback); 7269 CU_ADD_TEST(suite, test_set_multipath_policy); 7270 CU_ADD_TEST(suite, test_uuid_generation); 7271 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7272 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7273 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7274 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7275 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7276 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7277 7278 allocate_threads(3); 7279 set_thread(0); 7280 bdev_nvme_library_init(); 7281 init_accel(); 7282 7283 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7284 7285 set_thread(0); 7286 bdev_nvme_library_fini(); 7287 fini_accel(); 7288 free_threads(); 7289 7290 CU_cleanup_registry(); 7291 7292 return num_failures; 7293 } 7294