1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 65 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 66 67 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 68 69 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 70 enum spdk_bdev_reset_stat_mode mode)); 71 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 72 struct spdk_bdev_io_stat *add)); 73 74 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 75 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 76 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 77 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 78 79 int 80 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 81 struct spdk_memory_domain **domains, int array_size) 82 { 83 int i, min_array_size; 84 85 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 86 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 87 for (i = 0; i < min_array_size; i++) { 88 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 89 } 90 } 91 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 92 93 return 0; 94 } 95 96 struct spdk_io_channel * 97 spdk_accel_get_io_channel(void) 98 { 99 return spdk_get_io_channel(g_accel_p); 100 } 101 102 void 103 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 104 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 105 { 106 /* Avoid warning that opts is used uninitialised */ 107 memset(opts, 0, opts_size); 108 } 109 110 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003" 111 112 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN}; 113 114 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 115 (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts); 116 117 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 118 (const struct spdk_nvme_ctrlr *ctrlr), 0); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 121 (struct spdk_nvme_ctrlr *ctrlr), NULL); 122 123 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 124 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 125 126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 127 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 128 129 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 130 131 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 132 133 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 134 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 135 136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 137 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 138 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 139 140 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 141 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 142 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 143 144 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 145 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 146 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 147 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 148 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 149 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 150 151 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 152 size_t *size), 0); 153 154 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 155 156 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 157 158 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 159 160 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 161 162 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 163 164 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 165 166 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 167 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 168 169 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 170 171 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 172 char *name, size_t *size), 0); 173 174 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 175 (struct spdk_nvme_ns *ns), 0); 176 177 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 178 (const struct spdk_nvme_ctrlr *ctrlr), 0); 179 180 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 181 (struct spdk_nvme_ns *ns), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 184 (struct spdk_nvme_ns *ns), 0); 185 186 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 187 (struct spdk_nvme_ns *ns), 0); 188 189 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 190 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 191 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 192 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 193 194 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 195 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 196 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 197 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 198 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 199 200 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 201 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 202 void *payload, uint32_t payload_size, uint64_t slba, 203 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 204 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 205 206 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 208 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 209 210 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 211 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 212 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 213 214 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 215 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 216 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 217 218 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 219 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 220 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 221 222 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 223 224 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 225 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 226 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 227 228 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 229 (const struct spdk_nvme_status *status), NULL); 230 231 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 232 (const struct spdk_nvme_status *status), NULL); 233 234 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 235 236 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 237 238 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 239 240 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 241 242 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 243 244 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 245 struct iovec *iov, 246 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 247 DEFINE_STUB(spdk_accel_append_crc32c, int, 248 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 249 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 250 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 251 DEFINE_STUB_V(spdk_accel_sequence_finish, 252 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 253 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 254 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 255 256 struct ut_nvme_req { 257 uint16_t opc; 258 spdk_nvme_cmd_cb cb_fn; 259 void *cb_arg; 260 struct spdk_nvme_cpl cpl; 261 TAILQ_ENTRY(ut_nvme_req) tailq; 262 }; 263 264 struct spdk_nvme_ns { 265 struct spdk_nvme_ctrlr *ctrlr; 266 uint32_t id; 267 bool is_active; 268 struct spdk_uuid *uuid; 269 enum spdk_nvme_ana_state ana_state; 270 enum spdk_nvme_csi csi; 271 }; 272 273 struct spdk_nvme_qpair { 274 struct spdk_nvme_ctrlr *ctrlr; 275 uint8_t failure_reason; 276 bool is_connected; 277 bool in_completion_context; 278 bool delete_after_completion_context; 279 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 280 uint32_t num_outstanding_reqs; 281 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 282 struct spdk_nvme_poll_group *poll_group; 283 void *poll_group_tailq_head; 284 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 285 }; 286 287 struct spdk_nvme_ctrlr { 288 uint32_t num_ns; 289 struct spdk_nvme_ns *ns; 290 struct spdk_nvme_ns_data *nsdata; 291 struct spdk_nvme_qpair adminq; 292 struct spdk_nvme_ctrlr_data cdata; 293 bool attached; 294 bool is_failed; 295 bool fail_reset; 296 bool is_removed; 297 struct spdk_nvme_transport_id trid; 298 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 299 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 300 struct spdk_nvme_ctrlr_opts opts; 301 }; 302 303 struct spdk_nvme_poll_group { 304 void *ctx; 305 struct spdk_nvme_accel_fn_table accel_fn_table; 306 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 307 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 308 }; 309 310 struct spdk_nvme_probe_ctx { 311 struct spdk_nvme_transport_id trid; 312 void *cb_ctx; 313 spdk_nvme_attach_cb attach_cb; 314 struct spdk_nvme_ctrlr *init_ctrlr; 315 }; 316 317 uint32_t 318 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 319 { 320 uint32_t nsid; 321 322 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 323 if (ctrlr->ns[nsid - 1].is_active) { 324 return nsid; 325 } 326 } 327 328 return 0; 329 } 330 331 uint32_t 332 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 333 { 334 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 335 if (ctrlr->ns[nsid - 1].is_active) { 336 return nsid; 337 } 338 } 339 340 return 0; 341 } 342 343 uint32_t 344 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 345 { 346 return qpair->num_outstanding_reqs; 347 } 348 349 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 350 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 351 g_ut_attached_ctrlrs); 352 static int g_ut_attach_ctrlr_status; 353 static size_t g_ut_attach_bdev_count; 354 static int g_ut_register_bdev_status; 355 static struct spdk_bdev *g_ut_registered_bdev; 356 static uint16_t g_ut_cntlid; 357 static struct nvme_path_id g_any_path = {}; 358 359 static void 360 ut_init_trid(struct spdk_nvme_transport_id *trid) 361 { 362 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 363 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 364 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 365 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 366 } 367 368 static void 369 ut_init_trid2(struct spdk_nvme_transport_id *trid) 370 { 371 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 372 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 373 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 374 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 375 } 376 377 static void 378 ut_init_trid3(struct spdk_nvme_transport_id *trid) 379 { 380 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 381 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 382 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 383 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 384 } 385 386 static int 387 cmp_int(int a, int b) 388 { 389 return a - b; 390 } 391 392 int 393 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 394 const struct spdk_nvme_transport_id *trid2) 395 { 396 int cmp; 397 398 /* We assume trtype is TCP for now. */ 399 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 400 401 cmp = cmp_int(trid1->trtype, trid2->trtype); 402 if (cmp) { 403 return cmp; 404 } 405 406 cmp = strcasecmp(trid1->traddr, trid2->traddr); 407 if (cmp) { 408 return cmp; 409 } 410 411 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 412 if (cmp) { 413 return cmp; 414 } 415 416 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 417 if (cmp) { 418 return cmp; 419 } 420 421 cmp = strcmp(trid1->subnqn, trid2->subnqn); 422 if (cmp) { 423 return cmp; 424 } 425 426 return 0; 427 } 428 429 static struct spdk_nvme_ctrlr * 430 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 431 bool ana_reporting, bool multipath) 432 { 433 struct spdk_nvme_ctrlr *ctrlr; 434 uint32_t i; 435 436 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 437 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 438 /* There is a ctrlr whose trid matches. */ 439 return NULL; 440 } 441 } 442 443 ctrlr = calloc(1, sizeof(*ctrlr)); 444 if (ctrlr == NULL) { 445 return NULL; 446 } 447 448 ctrlr->attached = true; 449 ctrlr->adminq.ctrlr = ctrlr; 450 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 451 ctrlr->adminq.is_connected = true; 452 453 if (num_ns != 0) { 454 ctrlr->num_ns = num_ns; 455 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 456 if (ctrlr->ns == NULL) { 457 free(ctrlr); 458 return NULL; 459 } 460 461 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 462 if (ctrlr->nsdata == NULL) { 463 free(ctrlr->ns); 464 free(ctrlr); 465 return NULL; 466 } 467 468 for (i = 0; i < num_ns; i++) { 469 ctrlr->ns[i].id = i + 1; 470 ctrlr->ns[i].ctrlr = ctrlr; 471 ctrlr->ns[i].is_active = true; 472 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 473 ctrlr->nsdata[i].nsze = 1024; 474 ctrlr->nsdata[i].nmic.can_share = multipath; 475 } 476 477 ctrlr->cdata.nn = num_ns; 478 ctrlr->cdata.mnan = num_ns; 479 ctrlr->cdata.nanagrpid = num_ns; 480 } 481 482 ctrlr->cdata.cntlid = ++g_ut_cntlid; 483 ctrlr->cdata.cmic.multi_ctrlr = multipath; 484 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 485 ctrlr->trid = *trid; 486 TAILQ_INIT(&ctrlr->active_io_qpairs); 487 488 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 489 490 return ctrlr; 491 } 492 493 static void 494 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 495 { 496 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 497 498 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 499 free(ctrlr->nsdata); 500 free(ctrlr->ns); 501 free(ctrlr); 502 } 503 504 static int 505 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 506 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 507 { 508 struct ut_nvme_req *req; 509 510 req = calloc(1, sizeof(*req)); 511 if (req == NULL) { 512 return -ENOMEM; 513 } 514 515 req->opc = opc; 516 req->cb_fn = cb_fn; 517 req->cb_arg = cb_arg; 518 519 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 520 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 521 522 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 523 qpair->num_outstanding_reqs++; 524 525 return 0; 526 } 527 528 static struct ut_nvme_req * 529 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 530 { 531 struct ut_nvme_req *req; 532 533 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 534 if (req->cb_arg == cb_arg) { 535 break; 536 } 537 } 538 539 return req; 540 } 541 542 static struct spdk_bdev_io * 543 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 544 struct spdk_io_channel *ch) 545 { 546 struct spdk_bdev_io *bdev_io; 547 548 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 549 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 550 bdev_io->type = type; 551 bdev_io->bdev = &nbdev->disk; 552 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 553 554 return bdev_io; 555 } 556 557 static void 558 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 559 { 560 bdev_io->u.bdev.iovs = &bdev_io->iov; 561 bdev_io->u.bdev.iovcnt = 1; 562 563 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 564 bdev_io->iov.iov_len = 4096; 565 } 566 567 static void 568 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 569 { 570 if (ctrlr->is_failed) { 571 free(ctrlr); 572 return; 573 } 574 575 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 576 if (probe_ctx->cb_ctx) { 577 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 578 } 579 580 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 581 582 if (probe_ctx->attach_cb) { 583 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 584 } 585 } 586 587 int 588 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 589 { 590 struct spdk_nvme_ctrlr *ctrlr, *tmp; 591 592 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 593 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 594 continue; 595 } 596 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 597 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 598 } 599 600 free(probe_ctx); 601 602 return 0; 603 } 604 605 struct spdk_nvme_probe_ctx * 606 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 607 const struct spdk_nvme_ctrlr_opts *opts, 608 spdk_nvme_attach_cb attach_cb) 609 { 610 struct spdk_nvme_probe_ctx *probe_ctx; 611 612 if (trid == NULL) { 613 return NULL; 614 } 615 616 probe_ctx = calloc(1, sizeof(*probe_ctx)); 617 if (probe_ctx == NULL) { 618 return NULL; 619 } 620 621 probe_ctx->trid = *trid; 622 probe_ctx->cb_ctx = (void *)opts; 623 probe_ctx->attach_cb = attach_cb; 624 625 return probe_ctx; 626 } 627 628 int 629 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 630 { 631 if (ctrlr->attached) { 632 ut_detach_ctrlr(ctrlr); 633 } 634 635 return 0; 636 } 637 638 int 639 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 640 { 641 SPDK_CU_ASSERT_FATAL(ctx != NULL); 642 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 643 644 return 0; 645 } 646 647 int 648 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 649 { 650 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 651 } 652 653 void 654 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 655 { 656 memset(opts, 0, opts_size); 657 658 snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN); 659 } 660 661 const struct spdk_nvme_ctrlr_data * 662 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 663 { 664 return &ctrlr->cdata; 665 } 666 667 uint32_t 668 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 669 { 670 return ctrlr->num_ns; 671 } 672 673 struct spdk_nvme_ns * 674 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 675 { 676 if (nsid < 1 || nsid > ctrlr->num_ns) { 677 return NULL; 678 } 679 680 return &ctrlr->ns[nsid - 1]; 681 } 682 683 bool 684 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 685 { 686 if (nsid < 1 || nsid > ctrlr->num_ns) { 687 return false; 688 } 689 690 return ctrlr->ns[nsid - 1].is_active; 691 } 692 693 union spdk_nvme_csts_register 694 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 695 { 696 union spdk_nvme_csts_register csts; 697 698 csts.raw = 0; 699 700 return csts; 701 } 702 703 union spdk_nvme_vs_register 704 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 705 { 706 union spdk_nvme_vs_register vs; 707 708 vs.raw = 0; 709 710 return vs; 711 } 712 713 struct spdk_nvme_qpair * 714 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 715 const struct spdk_nvme_io_qpair_opts *user_opts, 716 size_t opts_size) 717 { 718 struct spdk_nvme_qpair *qpair; 719 720 qpair = calloc(1, sizeof(*qpair)); 721 if (qpair == NULL) { 722 return NULL; 723 } 724 725 qpair->ctrlr = ctrlr; 726 TAILQ_INIT(&qpair->outstanding_reqs); 727 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 728 729 return qpair; 730 } 731 732 static void 733 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 734 { 735 struct spdk_nvme_poll_group *group = qpair->poll_group; 736 737 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 738 739 qpair->poll_group_tailq_head = &group->connected_qpairs; 740 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 741 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 742 } 743 744 static void 745 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 746 { 747 struct spdk_nvme_poll_group *group = qpair->poll_group; 748 749 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 750 751 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 752 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 753 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 754 } 755 756 int 757 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 758 struct spdk_nvme_qpair *qpair) 759 { 760 if (qpair->is_connected) { 761 return -EISCONN; 762 } 763 764 qpair->is_connected = true; 765 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 766 767 if (qpair->poll_group) { 768 nvme_poll_group_connect_qpair(qpair); 769 } 770 771 return 0; 772 } 773 774 void 775 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 776 { 777 if (!qpair->is_connected) { 778 return; 779 } 780 781 qpair->is_connected = false; 782 783 if (qpair->poll_group != NULL) { 784 nvme_poll_group_disconnect_qpair(qpair); 785 } 786 } 787 788 int 789 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 790 { 791 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 792 793 if (qpair->in_completion_context) { 794 qpair->delete_after_completion_context = true; 795 return 0; 796 } 797 798 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 799 800 if (qpair->poll_group != NULL) { 801 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 802 } 803 804 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 805 806 CU_ASSERT(qpair->num_outstanding_reqs == 0); 807 808 free(qpair); 809 810 return 0; 811 } 812 813 int 814 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 815 { 816 if (ctrlr->fail_reset) { 817 ctrlr->is_failed = true; 818 return -EIO; 819 } 820 821 ctrlr->adminq.is_connected = true; 822 return 0; 823 } 824 825 void 826 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 827 { 828 } 829 830 int 831 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 832 { 833 if (ctrlr->is_removed) { 834 return -ENXIO; 835 } 836 837 ctrlr->adminq.is_connected = false; 838 ctrlr->is_failed = false; 839 840 return 0; 841 } 842 843 void 844 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 845 { 846 ctrlr->is_failed = true; 847 } 848 849 bool 850 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 851 { 852 return ctrlr->is_failed; 853 } 854 855 spdk_nvme_qp_failure_reason 856 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 857 { 858 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 859 } 860 861 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 862 sizeof(uint32_t)) 863 static void 864 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 865 { 866 struct spdk_nvme_ana_page ana_hdr; 867 char _ana_desc[UT_ANA_DESC_SIZE]; 868 struct spdk_nvme_ana_group_descriptor *ana_desc; 869 struct spdk_nvme_ns *ns; 870 uint32_t i; 871 872 memset(&ana_hdr, 0, sizeof(ana_hdr)); 873 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 874 875 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 876 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 877 878 buf += sizeof(ana_hdr); 879 length -= sizeof(ana_hdr); 880 881 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 882 883 for (i = 0; i < ctrlr->num_ns; i++) { 884 ns = &ctrlr->ns[i]; 885 886 if (!ns->is_active) { 887 continue; 888 } 889 890 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 891 892 ana_desc->ana_group_id = ns->id; 893 ana_desc->num_of_nsid = 1; 894 ana_desc->ana_state = ns->ana_state; 895 ana_desc->nsid[0] = ns->id; 896 897 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 898 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 899 900 buf += UT_ANA_DESC_SIZE; 901 length -= UT_ANA_DESC_SIZE; 902 } 903 } 904 905 int 906 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 907 uint8_t log_page, uint32_t nsid, 908 void *payload, uint32_t payload_size, 909 uint64_t offset, 910 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 911 { 912 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 913 SPDK_CU_ASSERT_FATAL(offset == 0); 914 ut_create_ana_log_page(ctrlr, payload, payload_size); 915 } 916 917 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 918 cb_fn, cb_arg); 919 } 920 921 int 922 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 923 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 924 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 925 { 926 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 927 } 928 929 int 930 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 931 void *cmd_cb_arg, 932 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 933 { 934 struct ut_nvme_req *req = NULL, *abort_req; 935 936 if (qpair == NULL) { 937 qpair = &ctrlr->adminq; 938 } 939 940 abort_req = calloc(1, sizeof(*abort_req)); 941 if (abort_req == NULL) { 942 return -ENOMEM; 943 } 944 945 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 946 if (req->cb_arg == cmd_cb_arg) { 947 break; 948 } 949 } 950 951 if (req == NULL) { 952 free(abort_req); 953 return -ENOENT; 954 } 955 956 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 957 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 958 959 abort_req->opc = SPDK_NVME_OPC_ABORT; 960 abort_req->cb_fn = cb_fn; 961 abort_req->cb_arg = cb_arg; 962 963 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 964 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 965 abort_req->cpl.cdw0 = 0; 966 967 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 968 ctrlr->adminq.num_outstanding_reqs++; 969 970 return 0; 971 } 972 973 int32_t 974 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 975 { 976 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 977 } 978 979 uint32_t 980 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 981 { 982 return ns->id; 983 } 984 985 struct spdk_nvme_ctrlr * 986 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 987 { 988 return ns->ctrlr; 989 } 990 991 static inline struct spdk_nvme_ns_data * 992 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 993 { 994 return &ns->ctrlr->nsdata[ns->id - 1]; 995 } 996 997 const struct spdk_nvme_ns_data * 998 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 999 { 1000 return _nvme_ns_get_data(ns); 1001 } 1002 1003 uint64_t 1004 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1005 { 1006 return _nvme_ns_get_data(ns)->nsze; 1007 } 1008 1009 const struct spdk_uuid * 1010 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1011 { 1012 return ns->uuid; 1013 } 1014 1015 enum spdk_nvme_csi 1016 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1017 return ns->csi; 1018 } 1019 1020 int 1021 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1022 void *metadata, uint64_t lba, uint32_t lba_count, 1023 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1024 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1025 { 1026 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1027 } 1028 1029 int 1030 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1031 void *buffer, void *metadata, uint64_t lba, 1032 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1033 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1034 { 1035 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1036 } 1037 1038 int 1039 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1040 uint64_t lba, uint32_t lba_count, 1041 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1042 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1043 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1044 uint16_t apptag_mask, uint16_t apptag) 1045 { 1046 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1047 } 1048 1049 int 1050 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1051 uint64_t lba, uint32_t lba_count, 1052 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1053 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1054 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1055 uint16_t apptag_mask, uint16_t apptag) 1056 { 1057 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1058 } 1059 1060 static bool g_ut_readv_ext_called; 1061 int 1062 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1063 uint64_t lba, uint32_t lba_count, 1064 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1065 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1066 spdk_nvme_req_next_sge_cb next_sge_fn, 1067 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1068 { 1069 g_ut_readv_ext_called = true; 1070 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1071 } 1072 1073 static bool g_ut_read_ext_called; 1074 int 1075 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1076 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1077 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1078 { 1079 g_ut_read_ext_called = true; 1080 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1081 } 1082 1083 static bool g_ut_writev_ext_called; 1084 int 1085 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1086 uint64_t lba, uint32_t lba_count, 1087 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1088 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1089 spdk_nvme_req_next_sge_cb next_sge_fn, 1090 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1091 { 1092 g_ut_writev_ext_called = true; 1093 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1094 } 1095 1096 static bool g_ut_write_ext_called; 1097 int 1098 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1099 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1100 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1101 { 1102 g_ut_write_ext_called = true; 1103 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1104 } 1105 1106 int 1107 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1108 uint64_t lba, uint32_t lba_count, 1109 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1110 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1111 spdk_nvme_req_next_sge_cb next_sge_fn, 1112 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1113 { 1114 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1115 } 1116 1117 int 1118 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1119 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1120 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1121 { 1122 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1123 } 1124 1125 int 1126 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1127 uint64_t lba, uint32_t lba_count, 1128 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1129 uint32_t io_flags) 1130 { 1131 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1132 } 1133 1134 int 1135 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1136 const struct spdk_nvme_scc_source_range *ranges, 1137 uint16_t num_ranges, uint64_t dest_lba, 1138 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1139 { 1140 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1141 } 1142 1143 struct spdk_nvme_poll_group * 1144 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1145 { 1146 struct spdk_nvme_poll_group *group; 1147 1148 group = calloc(1, sizeof(*group)); 1149 if (group == NULL) { 1150 return NULL; 1151 } 1152 1153 group->ctx = ctx; 1154 if (table != NULL) { 1155 group->accel_fn_table = *table; 1156 } 1157 TAILQ_INIT(&group->connected_qpairs); 1158 TAILQ_INIT(&group->disconnected_qpairs); 1159 1160 return group; 1161 } 1162 1163 int 1164 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1165 { 1166 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1167 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1168 return -EBUSY; 1169 } 1170 1171 free(group); 1172 1173 return 0; 1174 } 1175 1176 spdk_nvme_qp_failure_reason 1177 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1178 { 1179 return qpair->failure_reason; 1180 } 1181 1182 bool 1183 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1184 { 1185 return qpair->is_connected; 1186 } 1187 1188 int32_t 1189 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1190 uint32_t max_completions) 1191 { 1192 struct ut_nvme_req *req, *tmp; 1193 uint32_t num_completions = 0; 1194 1195 if (!qpair->is_connected) { 1196 return -ENXIO; 1197 } 1198 1199 qpair->in_completion_context = true; 1200 1201 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1202 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1203 qpair->num_outstanding_reqs--; 1204 1205 req->cb_fn(req->cb_arg, &req->cpl); 1206 1207 free(req); 1208 num_completions++; 1209 } 1210 1211 qpair->in_completion_context = false; 1212 if (qpair->delete_after_completion_context) { 1213 spdk_nvme_ctrlr_free_io_qpair(qpair); 1214 } 1215 1216 return num_completions; 1217 } 1218 1219 int64_t 1220 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1221 uint32_t completions_per_qpair, 1222 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1223 { 1224 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1225 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1226 1227 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1228 1229 if (disconnected_qpair_cb == NULL) { 1230 return -EINVAL; 1231 } 1232 1233 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1234 disconnected_qpair_cb(qpair, group->ctx); 1235 } 1236 1237 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1238 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1239 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1240 /* Bump the number of completions so this counts as "busy" */ 1241 num_completions++; 1242 continue; 1243 } 1244 1245 local_completions = spdk_nvme_qpair_process_completions(qpair, 1246 completions_per_qpair); 1247 if (local_completions < 0 && error_reason == 0) { 1248 error_reason = local_completions; 1249 } else { 1250 num_completions += local_completions; 1251 assert(num_completions >= 0); 1252 } 1253 } 1254 1255 return error_reason ? error_reason : num_completions; 1256 } 1257 1258 int 1259 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1260 struct spdk_nvme_qpair *qpair) 1261 { 1262 CU_ASSERT(!qpair->is_connected); 1263 1264 qpair->poll_group = group; 1265 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1266 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1267 1268 return 0; 1269 } 1270 1271 int 1272 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1273 struct spdk_nvme_qpair *qpair) 1274 { 1275 CU_ASSERT(!qpair->is_connected); 1276 1277 if (qpair->poll_group == NULL) { 1278 return -ENOENT; 1279 } 1280 1281 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1282 1283 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1284 1285 qpair->poll_group = NULL; 1286 qpair->poll_group_tailq_head = NULL; 1287 1288 return 0; 1289 } 1290 1291 int 1292 spdk_bdev_register(struct spdk_bdev *bdev) 1293 { 1294 g_ut_registered_bdev = bdev; 1295 1296 return g_ut_register_bdev_status; 1297 } 1298 1299 void 1300 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1301 { 1302 int rc; 1303 1304 rc = bdev->fn_table->destruct(bdev->ctxt); 1305 1306 if (bdev == g_ut_registered_bdev) { 1307 g_ut_registered_bdev = NULL; 1308 } 1309 1310 if (rc <= 0 && cb_fn != NULL) { 1311 cb_fn(cb_arg, rc); 1312 } 1313 } 1314 1315 int 1316 spdk_bdev_open_ext(const char *bdev_name, bool write, 1317 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1318 struct spdk_bdev_desc **desc) 1319 { 1320 if (g_ut_registered_bdev == NULL || 1321 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1322 return -ENODEV; 1323 } 1324 1325 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1326 1327 return 0; 1328 } 1329 1330 struct spdk_bdev * 1331 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1332 { 1333 return (struct spdk_bdev *)desc; 1334 } 1335 1336 int 1337 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1338 { 1339 bdev->blockcnt = size; 1340 1341 return 0; 1342 } 1343 1344 struct spdk_io_channel * 1345 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1346 { 1347 return (struct spdk_io_channel *)bdev_io->internal.ch; 1348 } 1349 1350 struct spdk_thread * 1351 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1352 { 1353 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1354 } 1355 1356 void 1357 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1358 { 1359 bdev_io->internal.status = status; 1360 bdev_io->internal.in_submit_request = false; 1361 } 1362 1363 void 1364 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1365 { 1366 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1367 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1368 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1369 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1370 } else { 1371 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1372 } 1373 1374 bdev_io->internal.error.nvme.cdw0 = cdw0; 1375 bdev_io->internal.error.nvme.sct = sct; 1376 bdev_io->internal.error.nvme.sc = sc; 1377 1378 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1379 } 1380 1381 void 1382 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1383 { 1384 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1385 1386 ut_bdev_io_set_buf(bdev_io); 1387 1388 cb(ch, bdev_io, true); 1389 } 1390 1391 static void 1392 test_create_ctrlr(void) 1393 { 1394 struct spdk_nvme_transport_id trid = {}; 1395 struct spdk_nvme_ctrlr ctrlr = {}; 1396 int rc; 1397 1398 ut_init_trid(&trid); 1399 1400 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1401 CU_ASSERT(rc == 0); 1402 1403 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1404 1405 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1406 CU_ASSERT(rc == 0); 1407 1408 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1409 1410 poll_threads(); 1411 spdk_delay_us(1000); 1412 poll_threads(); 1413 1414 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1415 } 1416 1417 static void 1418 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1419 { 1420 bool *detect_remove = cb_arg; 1421 1422 CU_ASSERT(rc != 0); 1423 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1424 1425 *detect_remove = true; 1426 } 1427 1428 static void 1429 test_reset_ctrlr(void) 1430 { 1431 struct spdk_nvme_transport_id trid = {}; 1432 struct spdk_nvme_ctrlr ctrlr = {}; 1433 struct nvme_ctrlr *nvme_ctrlr = NULL; 1434 struct nvme_path_id *curr_trid; 1435 struct spdk_io_channel *ch1, *ch2; 1436 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1437 bool detect_remove; 1438 int rc; 1439 1440 ut_init_trid(&trid); 1441 TAILQ_INIT(&ctrlr.active_io_qpairs); 1442 1443 set_thread(0); 1444 1445 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1446 CU_ASSERT(rc == 0); 1447 1448 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1449 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1450 1451 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1452 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1453 1454 ch1 = spdk_get_io_channel(nvme_ctrlr); 1455 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1456 1457 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1458 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1459 1460 set_thread(1); 1461 1462 ch2 = spdk_get_io_channel(nvme_ctrlr); 1463 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1464 1465 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1466 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1467 1468 /* Reset starts from thread 1. */ 1469 set_thread(1); 1470 1471 /* Case 1: ctrlr is already being destructed. */ 1472 nvme_ctrlr->destruct = true; 1473 1474 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1475 CU_ASSERT(rc == -ENXIO); 1476 1477 /* Case 2: reset is in progress. */ 1478 nvme_ctrlr->destruct = false; 1479 nvme_ctrlr->resetting = true; 1480 1481 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1482 CU_ASSERT(rc == -EBUSY); 1483 1484 /* Case 3: reset completes successfully. */ 1485 nvme_ctrlr->resetting = false; 1486 curr_trid->last_failed_tsc = spdk_get_ticks(); 1487 ctrlr.is_failed = true; 1488 1489 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1490 CU_ASSERT(rc == 0); 1491 CU_ASSERT(nvme_ctrlr->resetting == true); 1492 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1493 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1494 1495 poll_thread_times(0, 3); 1496 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1497 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1498 1499 poll_thread_times(0, 1); 1500 poll_thread_times(1, 1); 1501 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1502 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1503 CU_ASSERT(ctrlr.is_failed == true); 1504 1505 poll_thread_times(1, 1); 1506 poll_thread_times(0, 1); 1507 CU_ASSERT(ctrlr.is_failed == false); 1508 CU_ASSERT(ctrlr.adminq.is_connected == false); 1509 1510 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1511 poll_thread_times(0, 2); 1512 CU_ASSERT(ctrlr.adminq.is_connected == true); 1513 1514 poll_thread_times(0, 1); 1515 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1516 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1517 1518 poll_thread_times(1, 1); 1519 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1520 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1521 CU_ASSERT(nvme_ctrlr->resetting == true); 1522 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1523 1524 poll_thread_times(0, 2); 1525 CU_ASSERT(nvme_ctrlr->resetting == true); 1526 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1527 poll_thread_times(1, 1); 1528 CU_ASSERT(nvme_ctrlr->resetting == true); 1529 poll_thread_times(0, 1); 1530 CU_ASSERT(nvme_ctrlr->resetting == false); 1531 1532 /* Case 4: ctrlr is already removed. */ 1533 ctrlr.is_removed = true; 1534 1535 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1536 CU_ASSERT(rc == 0); 1537 1538 detect_remove = false; 1539 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1540 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1541 1542 poll_threads(); 1543 1544 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1545 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1546 CU_ASSERT(detect_remove == true); 1547 1548 ctrlr.is_removed = false; 1549 1550 spdk_put_io_channel(ch2); 1551 1552 set_thread(0); 1553 1554 spdk_put_io_channel(ch1); 1555 1556 poll_threads(); 1557 1558 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1559 CU_ASSERT(rc == 0); 1560 1561 poll_threads(); 1562 spdk_delay_us(1000); 1563 poll_threads(); 1564 1565 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1566 } 1567 1568 static void 1569 test_race_between_reset_and_destruct_ctrlr(void) 1570 { 1571 struct spdk_nvme_transport_id trid = {}; 1572 struct spdk_nvme_ctrlr ctrlr = {}; 1573 struct nvme_ctrlr *nvme_ctrlr; 1574 struct spdk_io_channel *ch1, *ch2; 1575 int rc; 1576 1577 ut_init_trid(&trid); 1578 TAILQ_INIT(&ctrlr.active_io_qpairs); 1579 1580 set_thread(0); 1581 1582 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1583 CU_ASSERT(rc == 0); 1584 1585 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1586 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1587 1588 ch1 = spdk_get_io_channel(nvme_ctrlr); 1589 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1590 1591 set_thread(1); 1592 1593 ch2 = spdk_get_io_channel(nvme_ctrlr); 1594 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1595 1596 /* Reset starts from thread 1. */ 1597 set_thread(1); 1598 1599 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1600 CU_ASSERT(rc == 0); 1601 CU_ASSERT(nvme_ctrlr->resetting == true); 1602 1603 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1604 set_thread(0); 1605 1606 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1607 CU_ASSERT(rc == 0); 1608 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1609 CU_ASSERT(nvme_ctrlr->destruct == true); 1610 CU_ASSERT(nvme_ctrlr->resetting == true); 1611 1612 poll_threads(); 1613 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1614 poll_threads(); 1615 1616 /* Reset completed but ctrlr is not still destructed yet. */ 1617 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1618 CU_ASSERT(nvme_ctrlr->destruct == true); 1619 CU_ASSERT(nvme_ctrlr->resetting == false); 1620 1621 /* New reset request is rejected. */ 1622 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1623 CU_ASSERT(rc == -ENXIO); 1624 1625 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1626 * However there are two channels and destruct is not completed yet. 1627 */ 1628 poll_threads(); 1629 1630 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1631 1632 set_thread(0); 1633 1634 spdk_put_io_channel(ch1); 1635 1636 set_thread(1); 1637 1638 spdk_put_io_channel(ch2); 1639 1640 poll_threads(); 1641 spdk_delay_us(1000); 1642 poll_threads(); 1643 1644 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1645 } 1646 1647 static void 1648 test_failover_ctrlr(void) 1649 { 1650 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1651 struct spdk_nvme_ctrlr ctrlr = {}; 1652 struct nvme_ctrlr *nvme_ctrlr = NULL; 1653 struct nvme_path_id *curr_trid, *next_trid; 1654 struct spdk_io_channel *ch1, *ch2; 1655 int rc; 1656 1657 ut_init_trid(&trid1); 1658 ut_init_trid2(&trid2); 1659 TAILQ_INIT(&ctrlr.active_io_qpairs); 1660 1661 set_thread(0); 1662 1663 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1664 CU_ASSERT(rc == 0); 1665 1666 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1667 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1668 1669 ch1 = spdk_get_io_channel(nvme_ctrlr); 1670 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1671 1672 set_thread(1); 1673 1674 ch2 = spdk_get_io_channel(nvme_ctrlr); 1675 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1676 1677 /* First, test one trid case. */ 1678 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1679 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1680 1681 /* Failover starts from thread 1. */ 1682 set_thread(1); 1683 1684 /* Case 1: ctrlr is already being destructed. */ 1685 nvme_ctrlr->destruct = true; 1686 1687 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1688 CU_ASSERT(rc == -ENXIO); 1689 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1690 1691 /* Case 2: reset is in progress. */ 1692 nvme_ctrlr->destruct = false; 1693 nvme_ctrlr->resetting = true; 1694 1695 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1696 CU_ASSERT(rc == -EINPROGRESS); 1697 1698 /* Case 3: reset completes successfully. */ 1699 nvme_ctrlr->resetting = false; 1700 1701 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1702 CU_ASSERT(rc == 0); 1703 1704 CU_ASSERT(nvme_ctrlr->resetting == true); 1705 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1706 1707 poll_threads(); 1708 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1709 poll_threads(); 1710 1711 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1712 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1713 1714 CU_ASSERT(nvme_ctrlr->resetting == false); 1715 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1716 1717 set_thread(0); 1718 1719 /* Second, test two trids case. */ 1720 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1721 CU_ASSERT(rc == 0); 1722 1723 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1724 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1725 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1726 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1727 1728 /* Failover starts from thread 1. */ 1729 set_thread(1); 1730 1731 /* Case 4: reset is in progress. */ 1732 nvme_ctrlr->resetting = true; 1733 1734 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1735 CU_ASSERT(rc == -EINPROGRESS); 1736 1737 /* Case 5: failover completes successfully. */ 1738 nvme_ctrlr->resetting = false; 1739 1740 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1741 CU_ASSERT(rc == 0); 1742 1743 CU_ASSERT(nvme_ctrlr->resetting == true); 1744 1745 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1746 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1747 CU_ASSERT(next_trid != curr_trid); 1748 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1749 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1750 1751 poll_threads(); 1752 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1753 poll_threads(); 1754 1755 CU_ASSERT(nvme_ctrlr->resetting == false); 1756 1757 spdk_put_io_channel(ch2); 1758 1759 set_thread(0); 1760 1761 spdk_put_io_channel(ch1); 1762 1763 poll_threads(); 1764 1765 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1766 CU_ASSERT(rc == 0); 1767 1768 poll_threads(); 1769 spdk_delay_us(1000); 1770 poll_threads(); 1771 1772 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1773 } 1774 1775 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1776 * 1777 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1778 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1779 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1780 * have been active, i.e., the head of the list until the failover completed. 1781 * However trid3 was inserted to the head of the list by mistake. 1782 * 1783 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1784 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1785 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1786 * may be executed repeatedly before failover is executed. Hence this bug is real. 1787 * 1788 * The following test verifies the fix. 1789 */ 1790 static void 1791 test_race_between_failover_and_add_secondary_trid(void) 1792 { 1793 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1794 struct spdk_nvme_ctrlr ctrlr = {}; 1795 struct nvme_ctrlr *nvme_ctrlr = NULL; 1796 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1797 struct spdk_io_channel *ch1, *ch2; 1798 int rc; 1799 1800 ut_init_trid(&trid1); 1801 ut_init_trid2(&trid2); 1802 ut_init_trid3(&trid3); 1803 TAILQ_INIT(&ctrlr.active_io_qpairs); 1804 1805 set_thread(0); 1806 1807 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1808 CU_ASSERT(rc == 0); 1809 1810 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1811 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1812 1813 ch1 = spdk_get_io_channel(nvme_ctrlr); 1814 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1815 1816 set_thread(1); 1817 1818 ch2 = spdk_get_io_channel(nvme_ctrlr); 1819 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1820 1821 set_thread(0); 1822 1823 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1824 CU_ASSERT(rc == 0); 1825 1826 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1827 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1828 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1829 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1830 path_id2 = TAILQ_NEXT(path_id1, link); 1831 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1832 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1833 1834 ctrlr.fail_reset = true; 1835 1836 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1837 CU_ASSERT(rc == 0); 1838 1839 poll_threads(); 1840 1841 CU_ASSERT(path_id1->last_failed_tsc != 0); 1842 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1843 1844 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1845 CU_ASSERT(rc == 0); 1846 1847 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1848 CU_ASSERT(rc == 0); 1849 1850 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1851 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1852 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1853 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1854 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1855 path_id3 = TAILQ_NEXT(path_id2, link); 1856 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1857 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1858 1859 poll_threads(); 1860 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1861 poll_threads(); 1862 1863 spdk_put_io_channel(ch1); 1864 1865 set_thread(1); 1866 1867 spdk_put_io_channel(ch2); 1868 1869 poll_threads(); 1870 1871 set_thread(0); 1872 1873 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1874 CU_ASSERT(rc == 0); 1875 1876 poll_threads(); 1877 spdk_delay_us(1000); 1878 poll_threads(); 1879 1880 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1881 } 1882 1883 static void 1884 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1885 { 1886 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1887 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1888 } 1889 1890 static void 1891 test_pending_reset(void) 1892 { 1893 struct spdk_nvme_transport_id trid = {}; 1894 struct spdk_nvme_ctrlr *ctrlr; 1895 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 1896 struct nvme_ctrlr *nvme_ctrlr = NULL; 1897 const int STRING_SIZE = 32; 1898 const char *attached_names[STRING_SIZE]; 1899 struct nvme_bdev *bdev; 1900 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1901 struct spdk_io_channel *ch1, *ch2; 1902 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1903 struct nvme_io_path *io_path1, *io_path2; 1904 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1905 int rc; 1906 1907 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1908 ut_init_trid(&trid); 1909 1910 set_thread(0); 1911 1912 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1913 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1914 1915 g_ut_attach_ctrlr_status = 0; 1916 g_ut_attach_bdev_count = 1; 1917 1918 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1919 attach_ctrlr_done, NULL, &opts, NULL, false); 1920 CU_ASSERT(rc == 0); 1921 1922 spdk_delay_us(1000); 1923 poll_threads(); 1924 1925 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1926 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1927 1928 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1929 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1930 1931 ch1 = spdk_get_io_channel(bdev); 1932 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1933 1934 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1935 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1936 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1937 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1938 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1939 1940 set_thread(1); 1941 1942 ch2 = spdk_get_io_channel(bdev); 1943 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1944 1945 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1946 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1947 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1948 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1949 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1950 1951 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1952 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1953 1954 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1955 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1956 1957 /* The first reset request is submitted on thread 1, and the second reset request 1958 * is submitted on thread 0 while processing the first request. 1959 */ 1960 bdev_nvme_submit_request(ch2, first_bdev_io); 1961 CU_ASSERT(nvme_ctrlr->resetting == true); 1962 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1963 1964 set_thread(0); 1965 1966 bdev_nvme_submit_request(ch1, second_bdev_io); 1967 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1968 1969 poll_threads(); 1970 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1971 poll_threads(); 1972 1973 CU_ASSERT(nvme_ctrlr->resetting == false); 1974 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1975 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1976 1977 /* The first reset request is submitted on thread 1, and the second reset request 1978 * is submitted on thread 0 while processing the first request. 1979 * 1980 * The difference from the above scenario is that the controller is removed while 1981 * processing the first request. Hence both reset requests should fail. 1982 */ 1983 set_thread(1); 1984 1985 bdev_nvme_submit_request(ch2, first_bdev_io); 1986 CU_ASSERT(nvme_ctrlr->resetting == true); 1987 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1988 1989 set_thread(0); 1990 1991 bdev_nvme_submit_request(ch1, second_bdev_io); 1992 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1993 1994 ctrlr->fail_reset = true; 1995 1996 poll_threads(); 1997 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1998 poll_threads(); 1999 2000 CU_ASSERT(nvme_ctrlr->resetting == false); 2001 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2002 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2003 2004 spdk_put_io_channel(ch1); 2005 2006 set_thread(1); 2007 2008 spdk_put_io_channel(ch2); 2009 2010 poll_threads(); 2011 2012 set_thread(0); 2013 2014 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2015 CU_ASSERT(rc == 0); 2016 2017 poll_threads(); 2018 spdk_delay_us(1000); 2019 poll_threads(); 2020 2021 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2022 2023 free(first_bdev_io); 2024 free(second_bdev_io); 2025 } 2026 2027 static void 2028 test_attach_ctrlr(void) 2029 { 2030 struct spdk_nvme_transport_id trid = {}; 2031 struct spdk_nvme_ctrlr *ctrlr; 2032 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2033 struct nvme_ctrlr *nvme_ctrlr; 2034 const int STRING_SIZE = 32; 2035 const char *attached_names[STRING_SIZE]; 2036 struct nvme_bdev *nbdev; 2037 int rc; 2038 2039 set_thread(0); 2040 2041 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2042 ut_init_trid(&trid); 2043 2044 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2045 * by probe polling. 2046 */ 2047 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2048 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2049 2050 ctrlr->is_failed = true; 2051 g_ut_attach_ctrlr_status = -EIO; 2052 g_ut_attach_bdev_count = 0; 2053 2054 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2055 attach_ctrlr_done, NULL, &opts, NULL, false); 2056 CU_ASSERT(rc == 0); 2057 2058 spdk_delay_us(1000); 2059 poll_threads(); 2060 2061 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2062 2063 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2064 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2065 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2066 2067 g_ut_attach_ctrlr_status = 0; 2068 2069 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2070 attach_ctrlr_done, NULL, &opts, NULL, false); 2071 CU_ASSERT(rc == 0); 2072 2073 spdk_delay_us(1000); 2074 poll_threads(); 2075 2076 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2077 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2078 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2079 2080 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2081 CU_ASSERT(rc == 0); 2082 2083 poll_threads(); 2084 spdk_delay_us(1000); 2085 poll_threads(); 2086 2087 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2088 2089 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2090 * one nvme_bdev is created. 2091 */ 2092 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2093 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2094 2095 g_ut_attach_bdev_count = 1; 2096 2097 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2098 attach_ctrlr_done, NULL, &opts, NULL, false); 2099 CU_ASSERT(rc == 0); 2100 2101 spdk_delay_us(1000); 2102 poll_threads(); 2103 2104 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2105 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2106 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2107 2108 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2109 attached_names[0] = NULL; 2110 2111 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2112 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2113 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2114 2115 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2116 CU_ASSERT(rc == 0); 2117 2118 poll_threads(); 2119 spdk_delay_us(1000); 2120 poll_threads(); 2121 2122 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2123 2124 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2125 * created because creating one nvme_bdev failed. 2126 */ 2127 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2128 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2129 2130 g_ut_register_bdev_status = -EINVAL; 2131 g_ut_attach_bdev_count = 0; 2132 2133 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2134 attach_ctrlr_done, NULL, &opts, NULL, false); 2135 CU_ASSERT(rc == 0); 2136 2137 spdk_delay_us(1000); 2138 poll_threads(); 2139 2140 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2141 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2142 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2143 2144 CU_ASSERT(attached_names[0] == NULL); 2145 2146 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2147 CU_ASSERT(rc == 0); 2148 2149 poll_threads(); 2150 spdk_delay_us(1000); 2151 poll_threads(); 2152 2153 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2154 2155 g_ut_register_bdev_status = 0; 2156 } 2157 2158 static void 2159 test_aer_cb(void) 2160 { 2161 struct spdk_nvme_transport_id trid = {}; 2162 struct spdk_nvme_ctrlr *ctrlr; 2163 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2164 struct nvme_ctrlr *nvme_ctrlr; 2165 struct nvme_bdev *bdev; 2166 const int STRING_SIZE = 32; 2167 const char *attached_names[STRING_SIZE]; 2168 union spdk_nvme_async_event_completion event = {}; 2169 struct spdk_nvme_cpl cpl = {}; 2170 int rc; 2171 2172 set_thread(0); 2173 2174 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2175 ut_init_trid(&trid); 2176 2177 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2178 * namespaces are populated. 2179 */ 2180 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2181 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2182 2183 ctrlr->ns[0].is_active = false; 2184 2185 g_ut_attach_ctrlr_status = 0; 2186 g_ut_attach_bdev_count = 3; 2187 2188 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2189 attach_ctrlr_done, NULL, &opts, NULL, false); 2190 CU_ASSERT(rc == 0); 2191 2192 spdk_delay_us(1000); 2193 poll_threads(); 2194 2195 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2196 poll_threads(); 2197 2198 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2199 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2200 2201 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2202 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2203 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2204 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2205 2206 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2207 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2208 CU_ASSERT(bdev->disk.blockcnt == 1024); 2209 2210 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2211 * change the size of the 4th namespace. 2212 */ 2213 ctrlr->ns[0].is_active = true; 2214 ctrlr->ns[2].is_active = false; 2215 ctrlr->nsdata[3].nsze = 2048; 2216 2217 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2218 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2219 cpl.cdw0 = event.raw; 2220 2221 aer_cb(nvme_ctrlr, &cpl); 2222 2223 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2224 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2225 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2226 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2227 CU_ASSERT(bdev->disk.blockcnt == 2048); 2228 2229 /* Change ANA state of active namespaces. */ 2230 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2231 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2232 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2233 2234 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2235 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2236 cpl.cdw0 = event.raw; 2237 2238 aer_cb(nvme_ctrlr, &cpl); 2239 2240 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2241 poll_threads(); 2242 2243 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2244 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2245 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2246 2247 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2248 CU_ASSERT(rc == 0); 2249 2250 poll_threads(); 2251 spdk_delay_us(1000); 2252 poll_threads(); 2253 2254 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2255 } 2256 2257 static void 2258 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2259 enum spdk_bdev_io_type io_type) 2260 { 2261 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2262 struct nvme_io_path *io_path; 2263 struct spdk_nvme_qpair *qpair; 2264 2265 io_path = bdev_nvme_find_io_path(nbdev_ch); 2266 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2267 qpair = io_path->qpair->qpair; 2268 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2269 2270 bdev_io->type = io_type; 2271 bdev_io->internal.in_submit_request = true; 2272 2273 bdev_nvme_submit_request(ch, bdev_io); 2274 2275 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2276 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2277 2278 poll_threads(); 2279 2280 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2281 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2282 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2283 } 2284 2285 static void 2286 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2287 enum spdk_bdev_io_type io_type) 2288 { 2289 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2290 struct nvme_io_path *io_path; 2291 struct spdk_nvme_qpair *qpair; 2292 2293 io_path = bdev_nvme_find_io_path(nbdev_ch); 2294 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2295 qpair = io_path->qpair->qpair; 2296 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2297 2298 bdev_io->type = io_type; 2299 bdev_io->internal.in_submit_request = true; 2300 2301 bdev_nvme_submit_request(ch, bdev_io); 2302 2303 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2304 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2305 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2306 } 2307 2308 static void 2309 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2310 { 2311 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2312 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2313 struct ut_nvme_req *req; 2314 struct nvme_io_path *io_path; 2315 struct spdk_nvme_qpair *qpair; 2316 2317 io_path = bdev_nvme_find_io_path(nbdev_ch); 2318 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2319 qpair = io_path->qpair->qpair; 2320 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2321 2322 /* Only compare and write now. */ 2323 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2324 bdev_io->internal.in_submit_request = true; 2325 2326 bdev_nvme_submit_request(ch, bdev_io); 2327 2328 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2329 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2330 CU_ASSERT(bio->first_fused_submitted == true); 2331 2332 /* First outstanding request is compare operation. */ 2333 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2334 SPDK_CU_ASSERT_FATAL(req != NULL); 2335 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2336 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2337 2338 poll_threads(); 2339 2340 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2341 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2342 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2343 } 2344 2345 static void 2346 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2347 struct spdk_nvme_ctrlr *ctrlr) 2348 { 2349 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2350 bdev_io->internal.in_submit_request = true; 2351 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2352 2353 bdev_nvme_submit_request(ch, bdev_io); 2354 2355 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2356 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2357 2358 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2359 poll_thread_times(1, 1); 2360 2361 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2362 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2363 2364 poll_thread_times(0, 1); 2365 2366 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2367 } 2368 2369 static void 2370 test_submit_nvme_cmd(void) 2371 { 2372 struct spdk_nvme_transport_id trid = {}; 2373 struct spdk_nvme_ctrlr *ctrlr; 2374 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2375 struct nvme_ctrlr *nvme_ctrlr; 2376 const int STRING_SIZE = 32; 2377 const char *attached_names[STRING_SIZE]; 2378 struct nvme_bdev *bdev; 2379 struct spdk_bdev_io *bdev_io; 2380 struct spdk_io_channel *ch; 2381 int rc; 2382 2383 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2384 ut_init_trid(&trid); 2385 2386 set_thread(1); 2387 2388 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2389 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2390 2391 g_ut_attach_ctrlr_status = 0; 2392 g_ut_attach_bdev_count = 1; 2393 2394 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2395 attach_ctrlr_done, NULL, &opts, NULL, false); 2396 CU_ASSERT(rc == 0); 2397 2398 spdk_delay_us(1000); 2399 poll_threads(); 2400 2401 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2402 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2403 2404 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2405 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2406 2407 set_thread(0); 2408 2409 ch = spdk_get_io_channel(bdev); 2410 SPDK_CU_ASSERT_FATAL(ch != NULL); 2411 2412 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2413 2414 bdev_io->u.bdev.iovs = NULL; 2415 2416 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2417 2418 ut_bdev_io_set_buf(bdev_io); 2419 2420 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2421 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2422 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2423 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2424 2425 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2426 2427 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2428 2429 /* Verify that ext NVME API is called when data is described by memory domain */ 2430 g_ut_read_ext_called = false; 2431 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2432 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2433 CU_ASSERT(g_ut_read_ext_called == true); 2434 g_ut_read_ext_called = false; 2435 bdev_io->u.bdev.memory_domain = NULL; 2436 2437 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2438 2439 free(bdev_io); 2440 2441 spdk_put_io_channel(ch); 2442 2443 poll_threads(); 2444 2445 set_thread(1); 2446 2447 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2448 CU_ASSERT(rc == 0); 2449 2450 poll_threads(); 2451 spdk_delay_us(1000); 2452 poll_threads(); 2453 2454 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2455 } 2456 2457 static void 2458 test_add_remove_trid(void) 2459 { 2460 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2461 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2462 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2463 struct nvme_ctrlr *nvme_ctrlr = NULL; 2464 const int STRING_SIZE = 32; 2465 const char *attached_names[STRING_SIZE]; 2466 struct nvme_path_id *ctrid; 2467 int rc; 2468 2469 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2470 ut_init_trid(&path1.trid); 2471 ut_init_trid2(&path2.trid); 2472 ut_init_trid3(&path3.trid); 2473 2474 set_thread(0); 2475 2476 g_ut_attach_ctrlr_status = 0; 2477 g_ut_attach_bdev_count = 0; 2478 2479 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2480 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2481 2482 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2483 attach_ctrlr_done, NULL, &opts, NULL, false); 2484 CU_ASSERT(rc == 0); 2485 2486 spdk_delay_us(1000); 2487 poll_threads(); 2488 2489 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2490 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2491 2492 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2493 2494 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2495 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2496 2497 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2498 attach_ctrlr_done, NULL, &opts, NULL, false); 2499 CU_ASSERT(rc == 0); 2500 2501 spdk_delay_us(1000); 2502 poll_threads(); 2503 2504 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2505 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2506 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2507 break; 2508 } 2509 } 2510 CU_ASSERT(ctrid != NULL); 2511 2512 /* trid3 is not in the registered list. */ 2513 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2514 CU_ASSERT(rc == -ENXIO); 2515 2516 /* trid2 is not used, and simply removed. */ 2517 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2518 CU_ASSERT(rc == 0); 2519 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2520 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2521 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2522 } 2523 2524 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2525 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2526 2527 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2528 attach_ctrlr_done, NULL, &opts, NULL, false); 2529 CU_ASSERT(rc == 0); 2530 2531 spdk_delay_us(1000); 2532 poll_threads(); 2533 2534 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2535 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2536 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2537 break; 2538 } 2539 } 2540 CU_ASSERT(ctrid != NULL); 2541 2542 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2543 * If we add path2 again, path2 should be inserted between path1 and path3. 2544 * Then, we remove path2. It is not used, and simply removed. 2545 */ 2546 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2547 2548 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2549 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2550 2551 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2552 attach_ctrlr_done, NULL, &opts, NULL, false); 2553 CU_ASSERT(rc == 0); 2554 2555 spdk_delay_us(1000); 2556 poll_threads(); 2557 2558 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2559 2560 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2561 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2562 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2563 2564 ctrid = TAILQ_NEXT(ctrid, link); 2565 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2566 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2567 2568 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2569 CU_ASSERT(rc == 0); 2570 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2571 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2572 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2573 } 2574 2575 /* path1 is currently used and path3 is an alternative path. 2576 * If we remove path1, path is changed to path3. 2577 */ 2578 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2579 CU_ASSERT(rc == 0); 2580 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2581 CU_ASSERT(nvme_ctrlr->resetting == true); 2582 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2583 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2584 } 2585 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2586 2587 poll_threads(); 2588 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2589 poll_threads(); 2590 2591 CU_ASSERT(nvme_ctrlr->resetting == false); 2592 2593 /* path3 is the current and only path. If we remove path3, the corresponding 2594 * nvme_ctrlr is removed. 2595 */ 2596 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2597 CU_ASSERT(rc == 0); 2598 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2599 2600 poll_threads(); 2601 spdk_delay_us(1000); 2602 poll_threads(); 2603 2604 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2605 2606 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2607 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2608 2609 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2610 attach_ctrlr_done, NULL, &opts, NULL, false); 2611 CU_ASSERT(rc == 0); 2612 2613 spdk_delay_us(1000); 2614 poll_threads(); 2615 2616 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2617 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2618 2619 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2620 2621 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2622 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2623 2624 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2625 attach_ctrlr_done, NULL, &opts, NULL, false); 2626 CU_ASSERT(rc == 0); 2627 2628 spdk_delay_us(1000); 2629 poll_threads(); 2630 2631 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2632 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2633 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2634 break; 2635 } 2636 } 2637 CU_ASSERT(ctrid != NULL); 2638 2639 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2640 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2641 CU_ASSERT(rc == 0); 2642 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2643 2644 poll_threads(); 2645 spdk_delay_us(1000); 2646 poll_threads(); 2647 2648 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2649 } 2650 2651 static void 2652 test_abort(void) 2653 { 2654 struct spdk_nvme_transport_id trid = {}; 2655 struct nvme_ctrlr_opts opts = {}; 2656 struct spdk_nvme_ctrlr *ctrlr; 2657 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 2658 struct nvme_ctrlr *nvme_ctrlr; 2659 const int STRING_SIZE = 32; 2660 const char *attached_names[STRING_SIZE]; 2661 struct nvme_bdev *bdev; 2662 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2663 struct spdk_io_channel *ch1, *ch2; 2664 struct nvme_bdev_channel *nbdev_ch1; 2665 struct nvme_io_path *io_path1; 2666 struct nvme_qpair *nvme_qpair1; 2667 int rc; 2668 2669 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2670 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2671 * are submitted on thread 1. Both should succeed. 2672 */ 2673 2674 ut_init_trid(&trid); 2675 2676 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2677 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2678 2679 g_ut_attach_ctrlr_status = 0; 2680 g_ut_attach_bdev_count = 1; 2681 2682 set_thread(1); 2683 2684 opts.ctrlr_loss_timeout_sec = -1; 2685 opts.reconnect_delay_sec = 1; 2686 2687 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2688 attach_ctrlr_done, NULL, &dopts, &opts, false); 2689 CU_ASSERT(rc == 0); 2690 2691 spdk_delay_us(1000); 2692 poll_threads(); 2693 2694 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2695 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2696 2697 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2698 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2699 2700 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2701 ut_bdev_io_set_buf(write_io); 2702 2703 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2704 ut_bdev_io_set_buf(fuse_io); 2705 2706 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2707 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2708 2709 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2710 2711 set_thread(0); 2712 2713 ch1 = spdk_get_io_channel(bdev); 2714 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2715 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2716 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2717 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2718 nvme_qpair1 = io_path1->qpair; 2719 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2720 2721 set_thread(1); 2722 2723 ch2 = spdk_get_io_channel(bdev); 2724 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2725 2726 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2727 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2728 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2729 2730 /* Aborting the already completed request should fail. */ 2731 write_io->internal.in_submit_request = true; 2732 bdev_nvme_submit_request(ch1, write_io); 2733 poll_threads(); 2734 2735 CU_ASSERT(write_io->internal.in_submit_request == false); 2736 2737 abort_io->u.abort.bio_to_abort = write_io; 2738 abort_io->internal.in_submit_request = true; 2739 2740 bdev_nvme_submit_request(ch1, abort_io); 2741 2742 poll_threads(); 2743 2744 CU_ASSERT(abort_io->internal.in_submit_request == false); 2745 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2746 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2747 2748 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2749 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2750 2751 admin_io->internal.in_submit_request = true; 2752 bdev_nvme_submit_request(ch1, admin_io); 2753 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2754 poll_threads(); 2755 2756 CU_ASSERT(admin_io->internal.in_submit_request == false); 2757 2758 abort_io->u.abort.bio_to_abort = admin_io; 2759 abort_io->internal.in_submit_request = true; 2760 2761 bdev_nvme_submit_request(ch2, abort_io); 2762 2763 poll_threads(); 2764 2765 CU_ASSERT(abort_io->internal.in_submit_request == false); 2766 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2767 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2768 2769 /* Aborting the write request should succeed. */ 2770 write_io->internal.in_submit_request = true; 2771 bdev_nvme_submit_request(ch1, write_io); 2772 2773 CU_ASSERT(write_io->internal.in_submit_request == true); 2774 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2775 2776 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2777 abort_io->u.abort.bio_to_abort = write_io; 2778 abort_io->internal.in_submit_request = true; 2779 2780 bdev_nvme_submit_request(ch1, abort_io); 2781 2782 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2783 poll_threads(); 2784 2785 CU_ASSERT(abort_io->internal.in_submit_request == false); 2786 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2787 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2788 CU_ASSERT(write_io->internal.in_submit_request == false); 2789 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2790 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2791 2792 /* Aborting the fuse request should succeed. */ 2793 fuse_io->internal.in_submit_request = true; 2794 bdev_nvme_submit_request(ch1, fuse_io); 2795 2796 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2797 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2798 2799 abort_io->u.abort.bio_to_abort = fuse_io; 2800 abort_io->internal.in_submit_request = true; 2801 2802 bdev_nvme_submit_request(ch1, abort_io); 2803 2804 spdk_delay_us(10000); 2805 poll_threads(); 2806 2807 CU_ASSERT(abort_io->internal.in_submit_request == false); 2808 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2809 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2810 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2811 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2812 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2813 2814 /* Aborting the admin request should succeed. */ 2815 admin_io->internal.in_submit_request = true; 2816 bdev_nvme_submit_request(ch1, admin_io); 2817 2818 CU_ASSERT(admin_io->internal.in_submit_request == true); 2819 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2820 2821 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2822 abort_io->u.abort.bio_to_abort = admin_io; 2823 abort_io->internal.in_submit_request = true; 2824 2825 bdev_nvme_submit_request(ch2, abort_io); 2826 2827 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2828 poll_threads(); 2829 2830 CU_ASSERT(abort_io->internal.in_submit_request == false); 2831 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2832 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2833 CU_ASSERT(admin_io->internal.in_submit_request == false); 2834 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2835 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2836 2837 set_thread(0); 2838 2839 /* If qpair is disconnected, it is freed and then reconnected via resetting 2840 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2841 * while resetting the nvme_ctrlr. 2842 */ 2843 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2844 2845 poll_thread_times(0, 3); 2846 2847 CU_ASSERT(nvme_qpair1->qpair == NULL); 2848 CU_ASSERT(nvme_ctrlr->resetting == true); 2849 2850 write_io->internal.in_submit_request = true; 2851 2852 bdev_nvme_submit_request(ch1, write_io); 2853 2854 CU_ASSERT(write_io->internal.in_submit_request == true); 2855 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2856 2857 /* Aborting the queued write request should succeed immediately. */ 2858 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2859 abort_io->u.abort.bio_to_abort = write_io; 2860 abort_io->internal.in_submit_request = true; 2861 2862 bdev_nvme_submit_request(ch1, abort_io); 2863 2864 CU_ASSERT(abort_io->internal.in_submit_request == false); 2865 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2866 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2867 CU_ASSERT(write_io->internal.in_submit_request == false); 2868 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2869 2870 poll_threads(); 2871 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2872 poll_threads(); 2873 2874 spdk_put_io_channel(ch1); 2875 2876 set_thread(1); 2877 2878 spdk_put_io_channel(ch2); 2879 2880 poll_threads(); 2881 2882 free(write_io); 2883 free(fuse_io); 2884 free(admin_io); 2885 free(abort_io); 2886 2887 set_thread(1); 2888 2889 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2890 CU_ASSERT(rc == 0); 2891 2892 poll_threads(); 2893 spdk_delay_us(1000); 2894 poll_threads(); 2895 2896 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2897 } 2898 2899 static void 2900 test_get_io_qpair(void) 2901 { 2902 struct spdk_nvme_transport_id trid = {}; 2903 struct spdk_nvme_ctrlr ctrlr = {}; 2904 struct nvme_ctrlr *nvme_ctrlr = NULL; 2905 struct spdk_io_channel *ch; 2906 struct nvme_ctrlr_channel *ctrlr_ch; 2907 struct spdk_nvme_qpair *qpair; 2908 int rc; 2909 2910 ut_init_trid(&trid); 2911 TAILQ_INIT(&ctrlr.active_io_qpairs); 2912 2913 set_thread(0); 2914 2915 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2916 CU_ASSERT(rc == 0); 2917 2918 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2919 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2920 2921 ch = spdk_get_io_channel(nvme_ctrlr); 2922 SPDK_CU_ASSERT_FATAL(ch != NULL); 2923 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2924 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2925 2926 qpair = bdev_nvme_get_io_qpair(ch); 2927 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2928 2929 spdk_put_io_channel(ch); 2930 2931 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2932 CU_ASSERT(rc == 0); 2933 2934 poll_threads(); 2935 spdk_delay_us(1000); 2936 poll_threads(); 2937 2938 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2939 } 2940 2941 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2942 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2943 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2944 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2945 */ 2946 static void 2947 test_bdev_unregister(void) 2948 { 2949 struct spdk_nvme_transport_id trid = {}; 2950 struct spdk_nvme_ctrlr *ctrlr; 2951 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2952 struct nvme_ctrlr *nvme_ctrlr; 2953 struct nvme_ns *nvme_ns1, *nvme_ns2; 2954 const int STRING_SIZE = 32; 2955 const char *attached_names[STRING_SIZE]; 2956 struct nvme_bdev *bdev1, *bdev2; 2957 int rc; 2958 2959 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2960 ut_init_trid(&trid); 2961 2962 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2963 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2964 2965 g_ut_attach_ctrlr_status = 0; 2966 g_ut_attach_bdev_count = 2; 2967 2968 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2969 attach_ctrlr_done, NULL, &opts, NULL, false); 2970 CU_ASSERT(rc == 0); 2971 2972 spdk_delay_us(1000); 2973 poll_threads(); 2974 2975 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2976 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2977 2978 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2979 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2980 2981 bdev1 = nvme_ns1->bdev; 2982 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2983 2984 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2985 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2986 2987 bdev2 = nvme_ns2->bdev; 2988 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2989 2990 bdev_nvme_destruct(&bdev1->disk); 2991 bdev_nvme_destruct(&bdev2->disk); 2992 2993 poll_threads(); 2994 2995 CU_ASSERT(nvme_ns1->bdev == NULL); 2996 CU_ASSERT(nvme_ns2->bdev == NULL); 2997 2998 nvme_ctrlr->destruct = true; 2999 _nvme_ctrlr_destruct(nvme_ctrlr); 3000 3001 poll_threads(); 3002 spdk_delay_us(1000); 3003 poll_threads(); 3004 3005 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3006 } 3007 3008 static void 3009 test_compare_ns(void) 3010 { 3011 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3012 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3013 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3014 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3015 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3016 3017 /* No IDs are defined. */ 3018 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3019 3020 /* Only EUI64 are defined and not matched. */ 3021 nsdata1.eui64 = 0xABCDEF0123456789; 3022 nsdata2.eui64 = 0xBBCDEF0123456789; 3023 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3024 3025 /* Only EUI64 are defined and matched. */ 3026 nsdata2.eui64 = 0xABCDEF0123456789; 3027 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3028 3029 /* Only NGUID are defined and not matched. */ 3030 nsdata1.eui64 = 0x0; 3031 nsdata2.eui64 = 0x0; 3032 nsdata1.nguid[0] = 0x12; 3033 nsdata2.nguid[0] = 0x10; 3034 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3035 3036 /* Only NGUID are defined and matched. */ 3037 nsdata2.nguid[0] = 0x12; 3038 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3039 3040 /* Only UUID are defined and not matched. */ 3041 nsdata1.nguid[0] = 0x0; 3042 nsdata2.nguid[0] = 0x0; 3043 ns1.uuid = &uuid1; 3044 ns2.uuid = &uuid2; 3045 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3046 3047 /* Only one UUID is defined. */ 3048 ns1.uuid = NULL; 3049 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3050 3051 /* Only UUID are defined and matched. */ 3052 ns1.uuid = &uuid2; 3053 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3054 3055 /* All EUI64, NGUID, and UUID are defined and matched. */ 3056 nsdata1.eui64 = 0x123456789ABCDEF; 3057 nsdata2.eui64 = 0x123456789ABCDEF; 3058 nsdata1.nguid[15] = 0x34; 3059 nsdata2.nguid[15] = 0x34; 3060 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3061 3062 /* CSI are not matched. */ 3063 ns1.csi = SPDK_NVME_CSI_ZNS; 3064 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3065 } 3066 3067 static void 3068 test_init_ana_log_page(void) 3069 { 3070 struct spdk_nvme_transport_id trid = {}; 3071 struct spdk_nvme_ctrlr *ctrlr; 3072 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3073 struct nvme_ctrlr *nvme_ctrlr; 3074 const int STRING_SIZE = 32; 3075 const char *attached_names[STRING_SIZE]; 3076 int rc; 3077 3078 set_thread(0); 3079 3080 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3081 ut_init_trid(&trid); 3082 3083 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3084 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3085 3086 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3087 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3088 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3089 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3090 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3091 3092 g_ut_attach_ctrlr_status = 0; 3093 g_ut_attach_bdev_count = 5; 3094 3095 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3096 attach_ctrlr_done, NULL, &opts, NULL, false); 3097 CU_ASSERT(rc == 0); 3098 3099 spdk_delay_us(1000); 3100 poll_threads(); 3101 3102 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3103 poll_threads(); 3104 3105 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3106 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3107 3108 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3109 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3110 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3111 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3112 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3113 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3114 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3115 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3116 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3117 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3118 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3119 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3120 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3121 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3122 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3123 3124 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3125 CU_ASSERT(rc == 0); 3126 3127 poll_threads(); 3128 spdk_delay_us(1000); 3129 poll_threads(); 3130 3131 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3132 } 3133 3134 static void 3135 init_accel(void) 3136 { 3137 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3138 sizeof(int), "accel_p"); 3139 } 3140 3141 static void 3142 fini_accel(void) 3143 { 3144 spdk_io_device_unregister(g_accel_p, NULL); 3145 } 3146 3147 static void 3148 test_get_memory_domains(void) 3149 { 3150 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3151 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3152 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3153 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3154 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3155 struct spdk_memory_domain *domains[4] = {}; 3156 int rc = 0; 3157 3158 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3159 3160 /* nvme controller doesn't have memory domains */ 3161 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3162 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3163 CU_ASSERT(rc == 0); 3164 CU_ASSERT(domains[0] == NULL); 3165 CU_ASSERT(domains[1] == NULL); 3166 3167 /* nvme controller has a memory domain */ 3168 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3169 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3170 CU_ASSERT(rc == 1); 3171 CU_ASSERT(domains[0] != NULL); 3172 memset(domains, 0, sizeof(domains)); 3173 3174 /* multipath, 2 controllers report 1 memory domain each */ 3175 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3176 3177 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3178 CU_ASSERT(rc == 2); 3179 CU_ASSERT(domains[0] != NULL); 3180 CU_ASSERT(domains[1] != NULL); 3181 memset(domains, 0, sizeof(domains)); 3182 3183 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3184 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3185 CU_ASSERT(rc == 2); 3186 3187 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3188 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3189 CU_ASSERT(rc == 2); 3190 CU_ASSERT(domains[0] == NULL); 3191 CU_ASSERT(domains[1] == NULL); 3192 3193 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3194 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3195 CU_ASSERT(rc == 2); 3196 CU_ASSERT(domains[0] != NULL); 3197 CU_ASSERT(domains[1] == NULL); 3198 memset(domains, 0, sizeof(domains)); 3199 3200 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3201 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3202 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3203 CU_ASSERT(rc == 4); 3204 CU_ASSERT(domains[0] != NULL); 3205 CU_ASSERT(domains[1] != NULL); 3206 CU_ASSERT(domains[2] != NULL); 3207 CU_ASSERT(domains[3] != NULL); 3208 memset(domains, 0, sizeof(domains)); 3209 3210 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3211 * Array size is less than the number of memory domains */ 3212 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3213 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3214 CU_ASSERT(rc == 4); 3215 CU_ASSERT(domains[0] != NULL); 3216 CU_ASSERT(domains[1] != NULL); 3217 CU_ASSERT(domains[2] != NULL); 3218 CU_ASSERT(domains[3] == NULL); 3219 memset(domains, 0, sizeof(domains)); 3220 3221 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3222 } 3223 3224 static void 3225 test_reconnect_qpair(void) 3226 { 3227 struct spdk_nvme_transport_id trid = {}; 3228 struct spdk_nvme_ctrlr *ctrlr; 3229 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3230 struct nvme_ctrlr *nvme_ctrlr; 3231 const int STRING_SIZE = 32; 3232 const char *attached_names[STRING_SIZE]; 3233 struct nvme_bdev *bdev; 3234 struct spdk_io_channel *ch1, *ch2; 3235 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3236 struct nvme_io_path *io_path1, *io_path2; 3237 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3238 int rc; 3239 3240 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3241 ut_init_trid(&trid); 3242 3243 set_thread(0); 3244 3245 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3246 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3247 3248 g_ut_attach_ctrlr_status = 0; 3249 g_ut_attach_bdev_count = 1; 3250 3251 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3252 attach_ctrlr_done, NULL, &opts, NULL, false); 3253 CU_ASSERT(rc == 0); 3254 3255 spdk_delay_us(1000); 3256 poll_threads(); 3257 3258 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3259 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3260 3261 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3262 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3263 3264 ch1 = spdk_get_io_channel(bdev); 3265 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3266 3267 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3268 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3269 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3270 nvme_qpair1 = io_path1->qpair; 3271 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3272 3273 set_thread(1); 3274 3275 ch2 = spdk_get_io_channel(bdev); 3276 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3277 3278 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3279 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3280 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3281 nvme_qpair2 = io_path2->qpair; 3282 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3283 3284 /* If a qpair is disconnected, it is freed and then reconnected via 3285 * resetting the corresponding nvme_ctrlr. 3286 */ 3287 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3288 ctrlr->is_failed = true; 3289 3290 poll_thread_times(1, 3); 3291 CU_ASSERT(nvme_qpair1->qpair != NULL); 3292 CU_ASSERT(nvme_qpair2->qpair == NULL); 3293 CU_ASSERT(nvme_ctrlr->resetting == true); 3294 3295 poll_thread_times(0, 3); 3296 CU_ASSERT(nvme_qpair1->qpair == NULL); 3297 CU_ASSERT(nvme_qpair2->qpair == NULL); 3298 CU_ASSERT(ctrlr->is_failed == true); 3299 3300 poll_thread_times(1, 2); 3301 poll_thread_times(0, 1); 3302 CU_ASSERT(ctrlr->is_failed == false); 3303 CU_ASSERT(ctrlr->adminq.is_connected == false); 3304 3305 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3306 poll_thread_times(0, 2); 3307 CU_ASSERT(ctrlr->adminq.is_connected == true); 3308 3309 poll_thread_times(0, 1); 3310 poll_thread_times(1, 1); 3311 CU_ASSERT(nvme_qpair1->qpair != NULL); 3312 CU_ASSERT(nvme_qpair2->qpair != NULL); 3313 CU_ASSERT(nvme_ctrlr->resetting == true); 3314 3315 poll_thread_times(0, 2); 3316 poll_thread_times(1, 1); 3317 poll_thread_times(0, 1); 3318 CU_ASSERT(nvme_ctrlr->resetting == false); 3319 3320 poll_threads(); 3321 3322 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3323 * fails, the qpair is just freed. 3324 */ 3325 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3326 ctrlr->is_failed = true; 3327 ctrlr->fail_reset = true; 3328 3329 poll_thread_times(1, 3); 3330 CU_ASSERT(nvme_qpair1->qpair != NULL); 3331 CU_ASSERT(nvme_qpair2->qpair == NULL); 3332 CU_ASSERT(nvme_ctrlr->resetting == true); 3333 3334 poll_thread_times(0, 3); 3335 poll_thread_times(1, 1); 3336 CU_ASSERT(nvme_qpair1->qpair == NULL); 3337 CU_ASSERT(nvme_qpair2->qpair == NULL); 3338 CU_ASSERT(ctrlr->is_failed == true); 3339 3340 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3341 poll_thread_times(0, 3); 3342 poll_thread_times(1, 1); 3343 poll_thread_times(0, 1); 3344 CU_ASSERT(ctrlr->is_failed == true); 3345 CU_ASSERT(nvme_ctrlr->resetting == false); 3346 CU_ASSERT(nvme_qpair1->qpair == NULL); 3347 CU_ASSERT(nvme_qpair2->qpair == NULL); 3348 3349 poll_threads(); 3350 3351 spdk_put_io_channel(ch2); 3352 3353 set_thread(0); 3354 3355 spdk_put_io_channel(ch1); 3356 3357 poll_threads(); 3358 3359 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3360 CU_ASSERT(rc == 0); 3361 3362 poll_threads(); 3363 spdk_delay_us(1000); 3364 poll_threads(); 3365 3366 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3367 } 3368 3369 static void 3370 test_create_bdev_ctrlr(void) 3371 { 3372 struct nvme_path_id path1 = {}, path2 = {}; 3373 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3374 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3375 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3376 const int STRING_SIZE = 32; 3377 const char *attached_names[STRING_SIZE]; 3378 int rc; 3379 3380 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3381 ut_init_trid(&path1.trid); 3382 ut_init_trid2(&path2.trid); 3383 3384 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3385 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3386 3387 g_ut_attach_ctrlr_status = 0; 3388 g_ut_attach_bdev_count = 0; 3389 3390 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3391 attach_ctrlr_done, NULL, &opts, NULL, true); 3392 CU_ASSERT(rc == 0); 3393 3394 spdk_delay_us(1000); 3395 poll_threads(); 3396 3397 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3398 poll_threads(); 3399 3400 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3401 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3402 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3403 3404 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3405 g_ut_attach_ctrlr_status = -EINVAL; 3406 3407 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3408 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3409 3410 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3411 3412 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3413 attach_ctrlr_done, NULL, &opts, NULL, true); 3414 CU_ASSERT(rc == 0); 3415 3416 spdk_delay_us(1000); 3417 poll_threads(); 3418 3419 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3420 poll_threads(); 3421 3422 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3423 3424 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3425 g_ut_attach_ctrlr_status = 0; 3426 3427 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3428 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3429 3430 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3431 attach_ctrlr_done, NULL, &opts, NULL, true); 3432 CU_ASSERT(rc == 0); 3433 3434 spdk_delay_us(1000); 3435 poll_threads(); 3436 3437 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3438 poll_threads(); 3439 3440 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3441 3442 /* Delete two ctrlrs at once. */ 3443 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3444 CU_ASSERT(rc == 0); 3445 3446 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3447 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3448 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3449 3450 poll_threads(); 3451 spdk_delay_us(1000); 3452 poll_threads(); 3453 3454 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3455 3456 /* Add two ctrlrs and delete one by one. */ 3457 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3458 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3459 3460 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3461 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3462 3463 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3464 attach_ctrlr_done, NULL, &opts, NULL, true); 3465 CU_ASSERT(rc == 0); 3466 3467 spdk_delay_us(1000); 3468 poll_threads(); 3469 3470 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3471 poll_threads(); 3472 3473 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3474 attach_ctrlr_done, NULL, &opts, NULL, true); 3475 CU_ASSERT(rc == 0); 3476 3477 spdk_delay_us(1000); 3478 poll_threads(); 3479 3480 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3481 poll_threads(); 3482 3483 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3484 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3485 3486 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3487 CU_ASSERT(rc == 0); 3488 3489 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3490 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3491 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3492 3493 poll_threads(); 3494 spdk_delay_us(1000); 3495 poll_threads(); 3496 3497 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3498 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3499 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3500 3501 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3502 CU_ASSERT(rc == 0); 3503 3504 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3505 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3506 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3507 3508 poll_threads(); 3509 spdk_delay_us(1000); 3510 poll_threads(); 3511 3512 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3513 } 3514 3515 static struct nvme_ns * 3516 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3517 { 3518 struct nvme_ns *nvme_ns; 3519 3520 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3521 if (nvme_ns->ctrlr == nvme_ctrlr) { 3522 return nvme_ns; 3523 } 3524 } 3525 3526 return NULL; 3527 } 3528 3529 static void 3530 test_add_multi_ns_to_bdev(void) 3531 { 3532 struct nvme_path_id path1 = {}, path2 = {}; 3533 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3534 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3535 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3536 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3537 struct nvme_ns *nvme_ns1, *nvme_ns2; 3538 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3539 const int STRING_SIZE = 32; 3540 const char *attached_names[STRING_SIZE]; 3541 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3542 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3543 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3544 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3545 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3546 int rc; 3547 3548 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3549 ut_init_trid(&path1.trid); 3550 ut_init_trid2(&path2.trid); 3551 3552 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3553 3554 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3555 * namespaces are populated. 3556 */ 3557 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3558 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3559 3560 ctrlr1->ns[1].is_active = false; 3561 ctrlr1->ns[4].is_active = false; 3562 ctrlr1->ns[0].uuid = &uuid1; 3563 ctrlr1->ns[2].uuid = &uuid3; 3564 ctrlr1->ns[3].uuid = &uuid4; 3565 3566 g_ut_attach_ctrlr_status = 0; 3567 g_ut_attach_bdev_count = 3; 3568 3569 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3570 attach_ctrlr_done, NULL, &opts, NULL, true); 3571 CU_ASSERT(rc == 0); 3572 3573 spdk_delay_us(1000); 3574 poll_threads(); 3575 3576 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3577 poll_threads(); 3578 3579 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3580 * namespaces are populated. The uuid of 4th namespace is different, and hence 3581 * adding 4th namespace to a bdev should fail. 3582 */ 3583 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3584 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3585 3586 ctrlr2->ns[2].is_active = false; 3587 ctrlr2->ns[4].is_active = false; 3588 ctrlr2->ns[0].uuid = &uuid1; 3589 ctrlr2->ns[1].uuid = &uuid2; 3590 ctrlr2->ns[3].uuid = &uuid44; 3591 3592 g_ut_attach_ctrlr_status = 0; 3593 g_ut_attach_bdev_count = 2; 3594 3595 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3596 attach_ctrlr_done, NULL, &opts, NULL, true); 3597 CU_ASSERT(rc == 0); 3598 3599 spdk_delay_us(1000); 3600 poll_threads(); 3601 3602 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3603 poll_threads(); 3604 3605 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3606 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3607 3608 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3609 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3610 3611 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3612 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3613 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3614 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3615 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3616 3617 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3618 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3619 3620 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3621 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3622 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3623 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3624 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3625 3626 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3627 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3628 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3629 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3630 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3631 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3632 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3633 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3634 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3635 3636 CU_ASSERT(bdev1->ref == 2); 3637 CU_ASSERT(bdev2->ref == 1); 3638 CU_ASSERT(bdev3->ref == 1); 3639 CU_ASSERT(bdev4->ref == 1); 3640 3641 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3642 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3643 CU_ASSERT(rc == 0); 3644 3645 poll_threads(); 3646 spdk_delay_us(1000); 3647 poll_threads(); 3648 3649 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3650 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3651 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2); 3652 3653 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3654 CU_ASSERT(rc == 0); 3655 3656 poll_threads(); 3657 spdk_delay_us(1000); 3658 poll_threads(); 3659 3660 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3661 3662 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3663 * can be deleted when the bdev subsystem shutdown. 3664 */ 3665 g_ut_attach_bdev_count = 1; 3666 3667 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3668 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3669 3670 ctrlr1->ns[0].uuid = &uuid1; 3671 3672 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3673 attach_ctrlr_done, NULL, &opts, NULL, true); 3674 CU_ASSERT(rc == 0); 3675 3676 spdk_delay_us(1000); 3677 poll_threads(); 3678 3679 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3680 poll_threads(); 3681 3682 ut_init_trid2(&path2.trid); 3683 3684 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3685 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3686 3687 ctrlr2->ns[0].uuid = &uuid1; 3688 3689 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3690 attach_ctrlr_done, NULL, &opts, NULL, true); 3691 CU_ASSERT(rc == 0); 3692 3693 spdk_delay_us(1000); 3694 poll_threads(); 3695 3696 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3697 poll_threads(); 3698 3699 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3700 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3701 3702 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3703 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3704 3705 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3706 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3707 3708 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3709 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3710 3711 /* Check if a nvme_bdev has two nvme_ns. */ 3712 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3713 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3714 CU_ASSERT(nvme_ns1->bdev == bdev1); 3715 3716 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3717 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3718 CU_ASSERT(nvme_ns2->bdev == bdev1); 3719 3720 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3721 bdev_nvme_destruct(&bdev1->disk); 3722 3723 poll_threads(); 3724 3725 CU_ASSERT(nvme_ns1->bdev == NULL); 3726 CU_ASSERT(nvme_ns2->bdev == NULL); 3727 3728 nvme_ctrlr1->destruct = true; 3729 _nvme_ctrlr_destruct(nvme_ctrlr1); 3730 3731 poll_threads(); 3732 spdk_delay_us(1000); 3733 poll_threads(); 3734 3735 nvme_ctrlr2->destruct = true; 3736 _nvme_ctrlr_destruct(nvme_ctrlr2); 3737 3738 poll_threads(); 3739 spdk_delay_us(1000); 3740 poll_threads(); 3741 3742 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3743 } 3744 3745 static void 3746 test_add_multi_io_paths_to_nbdev_ch(void) 3747 { 3748 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3749 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3750 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3751 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3752 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3753 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3754 const int STRING_SIZE = 32; 3755 const char *attached_names[STRING_SIZE]; 3756 struct nvme_bdev *bdev; 3757 struct spdk_io_channel *ch; 3758 struct nvme_bdev_channel *nbdev_ch; 3759 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3760 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3761 int rc; 3762 3763 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3764 ut_init_trid(&path1.trid); 3765 ut_init_trid2(&path2.trid); 3766 ut_init_trid3(&path3.trid); 3767 g_ut_attach_ctrlr_status = 0; 3768 g_ut_attach_bdev_count = 1; 3769 3770 set_thread(1); 3771 3772 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3773 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3774 3775 ctrlr1->ns[0].uuid = &uuid1; 3776 3777 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3778 attach_ctrlr_done, NULL, &opts, NULL, true); 3779 CU_ASSERT(rc == 0); 3780 3781 spdk_delay_us(1000); 3782 poll_threads(); 3783 3784 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3785 poll_threads(); 3786 3787 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3788 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3789 3790 ctrlr2->ns[0].uuid = &uuid1; 3791 3792 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3793 attach_ctrlr_done, NULL, &opts, NULL, true); 3794 CU_ASSERT(rc == 0); 3795 3796 spdk_delay_us(1000); 3797 poll_threads(); 3798 3799 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3800 poll_threads(); 3801 3802 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3803 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3804 3805 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3806 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3807 3808 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3809 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3810 3811 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3812 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3813 3814 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3815 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3816 3817 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3818 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3819 3820 set_thread(0); 3821 3822 ch = spdk_get_io_channel(bdev); 3823 SPDK_CU_ASSERT_FATAL(ch != NULL); 3824 nbdev_ch = spdk_io_channel_get_ctx(ch); 3825 3826 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3827 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3828 3829 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3830 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3831 3832 set_thread(1); 3833 3834 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3835 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3836 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3837 3838 ctrlr3->ns[0].uuid = &uuid1; 3839 3840 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3841 attach_ctrlr_done, NULL, &opts, NULL, true); 3842 CU_ASSERT(rc == 0); 3843 3844 spdk_delay_us(1000); 3845 poll_threads(); 3846 3847 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3848 poll_threads(); 3849 3850 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn); 3851 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3852 3853 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3854 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3855 3856 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3857 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3858 3859 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3860 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3861 CU_ASSERT(rc == 0); 3862 3863 poll_threads(); 3864 spdk_delay_us(1000); 3865 poll_threads(); 3866 3867 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1); 3868 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3869 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3); 3870 3871 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3872 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3873 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3874 3875 set_thread(0); 3876 3877 spdk_put_io_channel(ch); 3878 3879 poll_threads(); 3880 3881 set_thread(1); 3882 3883 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3884 CU_ASSERT(rc == 0); 3885 3886 poll_threads(); 3887 spdk_delay_us(1000); 3888 poll_threads(); 3889 3890 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3891 } 3892 3893 static void 3894 test_admin_path(void) 3895 { 3896 struct nvme_path_id path1 = {}, path2 = {}; 3897 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3898 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3899 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3900 const int STRING_SIZE = 32; 3901 const char *attached_names[STRING_SIZE]; 3902 struct nvme_bdev *bdev; 3903 struct spdk_io_channel *ch; 3904 struct spdk_bdev_io *bdev_io; 3905 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3906 int rc; 3907 3908 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3909 ut_init_trid(&path1.trid); 3910 ut_init_trid2(&path2.trid); 3911 g_ut_attach_ctrlr_status = 0; 3912 g_ut_attach_bdev_count = 1; 3913 3914 set_thread(0); 3915 3916 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3917 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3918 3919 ctrlr1->ns[0].uuid = &uuid1; 3920 3921 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3922 attach_ctrlr_done, NULL, &opts, NULL, true); 3923 CU_ASSERT(rc == 0); 3924 3925 spdk_delay_us(1000); 3926 poll_threads(); 3927 3928 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3929 poll_threads(); 3930 3931 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3932 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3933 3934 ctrlr2->ns[0].uuid = &uuid1; 3935 3936 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3937 attach_ctrlr_done, NULL, &opts, NULL, true); 3938 CU_ASSERT(rc == 0); 3939 3940 spdk_delay_us(1000); 3941 poll_threads(); 3942 3943 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3944 poll_threads(); 3945 3946 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3947 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3948 3949 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3950 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3951 3952 ch = spdk_get_io_channel(bdev); 3953 SPDK_CU_ASSERT_FATAL(ch != NULL); 3954 3955 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3956 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3957 3958 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3959 * submitted to ctrlr2. 3960 */ 3961 ctrlr1->is_failed = true; 3962 bdev_io->internal.in_submit_request = true; 3963 3964 bdev_nvme_submit_request(ch, bdev_io); 3965 3966 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3967 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3968 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3969 3970 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3971 poll_threads(); 3972 3973 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3974 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3975 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3976 3977 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3978 ctrlr2->is_failed = true; 3979 bdev_io->internal.in_submit_request = true; 3980 3981 bdev_nvme_submit_request(ch, bdev_io); 3982 3983 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3984 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3985 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3986 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3987 3988 free(bdev_io); 3989 3990 spdk_put_io_channel(ch); 3991 3992 poll_threads(); 3993 3994 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3995 CU_ASSERT(rc == 0); 3996 3997 poll_threads(); 3998 spdk_delay_us(1000); 3999 poll_threads(); 4000 4001 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4002 } 4003 4004 static struct nvme_io_path * 4005 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 4006 struct nvme_ctrlr *nvme_ctrlr) 4007 { 4008 struct nvme_io_path *io_path; 4009 4010 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 4011 if (io_path->qpair->ctrlr == nvme_ctrlr) { 4012 return io_path; 4013 } 4014 } 4015 4016 return NULL; 4017 } 4018 4019 static void 4020 test_reset_bdev_ctrlr(void) 4021 { 4022 struct nvme_path_id path1 = {}, path2 = {}; 4023 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4024 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4025 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4026 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4027 struct nvme_path_id *curr_path1, *curr_path2; 4028 const int STRING_SIZE = 32; 4029 const char *attached_names[STRING_SIZE]; 4030 struct nvme_bdev *bdev; 4031 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4032 struct nvme_bdev_io *first_bio; 4033 struct spdk_io_channel *ch1, *ch2; 4034 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4035 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4036 int rc; 4037 4038 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4039 ut_init_trid(&path1.trid); 4040 ut_init_trid2(&path2.trid); 4041 g_ut_attach_ctrlr_status = 0; 4042 g_ut_attach_bdev_count = 1; 4043 4044 set_thread(0); 4045 4046 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4047 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4048 4049 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4050 attach_ctrlr_done, NULL, &opts, NULL, true); 4051 CU_ASSERT(rc == 0); 4052 4053 spdk_delay_us(1000); 4054 poll_threads(); 4055 4056 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4057 poll_threads(); 4058 4059 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4060 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4061 4062 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4063 attach_ctrlr_done, NULL, &opts, NULL, true); 4064 CU_ASSERT(rc == 0); 4065 4066 spdk_delay_us(1000); 4067 poll_threads(); 4068 4069 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4070 poll_threads(); 4071 4072 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4073 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4074 4075 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4076 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4077 4078 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4079 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4080 4081 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4082 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4083 4084 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4085 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4086 4087 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4088 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4089 4090 set_thread(0); 4091 4092 ch1 = spdk_get_io_channel(bdev); 4093 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4094 4095 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4096 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4097 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4098 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4099 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4100 4101 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4102 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4103 4104 set_thread(1); 4105 4106 ch2 = spdk_get_io_channel(bdev); 4107 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4108 4109 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4110 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4111 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4112 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4113 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4114 4115 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4116 4117 /* The first reset request from bdev_io is submitted on thread 0. 4118 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4119 * 4120 * A few extra polls are necessary after resetting ctrlr1 to check 4121 * pending reset requests for ctrlr1. 4122 */ 4123 ctrlr1->is_failed = true; 4124 curr_path1->last_failed_tsc = spdk_get_ticks(); 4125 ctrlr2->is_failed = true; 4126 curr_path2->last_failed_tsc = spdk_get_ticks(); 4127 4128 set_thread(0); 4129 4130 bdev_nvme_submit_request(ch1, first_bdev_io); 4131 CU_ASSERT(first_bio->io_path == io_path11); 4132 CU_ASSERT(nvme_ctrlr1->resetting == true); 4133 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4134 4135 poll_thread_times(0, 3); 4136 CU_ASSERT(io_path11->qpair->qpair == NULL); 4137 CU_ASSERT(io_path21->qpair->qpair != NULL); 4138 4139 poll_thread_times(1, 2); 4140 CU_ASSERT(io_path11->qpair->qpair == NULL); 4141 CU_ASSERT(io_path21->qpair->qpair == NULL); 4142 CU_ASSERT(ctrlr1->is_failed == true); 4143 4144 poll_thread_times(0, 1); 4145 CU_ASSERT(nvme_ctrlr1->resetting == true); 4146 CU_ASSERT(ctrlr1->is_failed == false); 4147 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4148 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4149 4150 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4151 poll_thread_times(0, 2); 4152 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4153 4154 poll_thread_times(0, 1); 4155 CU_ASSERT(io_path11->qpair->qpair != NULL); 4156 CU_ASSERT(io_path21->qpair->qpair == NULL); 4157 4158 poll_thread_times(1, 1); 4159 CU_ASSERT(io_path11->qpair->qpair != NULL); 4160 CU_ASSERT(io_path21->qpair->qpair != NULL); 4161 4162 poll_thread_times(0, 2); 4163 CU_ASSERT(nvme_ctrlr1->resetting == true); 4164 poll_thread_times(1, 1); 4165 CU_ASSERT(nvme_ctrlr1->resetting == true); 4166 poll_thread_times(0, 2); 4167 CU_ASSERT(nvme_ctrlr1->resetting == false); 4168 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4169 CU_ASSERT(first_bio->io_path == io_path12); 4170 CU_ASSERT(nvme_ctrlr2->resetting == true); 4171 4172 poll_thread_times(0, 3); 4173 CU_ASSERT(io_path12->qpair->qpair == NULL); 4174 CU_ASSERT(io_path22->qpair->qpair != NULL); 4175 4176 poll_thread_times(1, 2); 4177 CU_ASSERT(io_path12->qpair->qpair == NULL); 4178 CU_ASSERT(io_path22->qpair->qpair == NULL); 4179 CU_ASSERT(ctrlr2->is_failed == true); 4180 4181 poll_thread_times(0, 1); 4182 CU_ASSERT(nvme_ctrlr2->resetting == true); 4183 CU_ASSERT(ctrlr2->is_failed == false); 4184 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4185 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4186 4187 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4188 poll_thread_times(0, 2); 4189 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4190 4191 poll_thread_times(0, 1); 4192 CU_ASSERT(io_path12->qpair->qpair != NULL); 4193 CU_ASSERT(io_path22->qpair->qpair == NULL); 4194 4195 poll_thread_times(1, 2); 4196 CU_ASSERT(io_path12->qpair->qpair != NULL); 4197 CU_ASSERT(io_path22->qpair->qpair != NULL); 4198 4199 poll_thread_times(0, 2); 4200 CU_ASSERT(nvme_ctrlr2->resetting == true); 4201 poll_thread_times(1, 1); 4202 CU_ASSERT(nvme_ctrlr2->resetting == true); 4203 poll_thread_times(0, 2); 4204 CU_ASSERT(first_bio->io_path == NULL); 4205 CU_ASSERT(nvme_ctrlr2->resetting == false); 4206 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4207 4208 poll_threads(); 4209 4210 /* There is a race between two reset requests from bdev_io. 4211 * 4212 * The first reset request is submitted on thread 0, and the second reset 4213 * request is submitted on thread 1 while the first is resetting ctrlr1. 4214 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4215 * both reset requests go to ctrlr2. The first comes earlier than the second. 4216 * The second is pending on ctrlr2 again. After the first completes resetting 4217 * ctrl2, both complete successfully. 4218 */ 4219 ctrlr1->is_failed = true; 4220 curr_path1->last_failed_tsc = spdk_get_ticks(); 4221 ctrlr2->is_failed = true; 4222 curr_path2->last_failed_tsc = spdk_get_ticks(); 4223 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4224 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4225 4226 set_thread(0); 4227 4228 bdev_nvme_submit_request(ch1, first_bdev_io); 4229 4230 set_thread(1); 4231 4232 bdev_nvme_submit_request(ch2, second_bdev_io); 4233 4234 CU_ASSERT(nvme_ctrlr1->resetting == true); 4235 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4236 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4237 4238 poll_threads(); 4239 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4240 poll_threads(); 4241 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4242 poll_threads(); 4243 4244 CU_ASSERT(ctrlr1->is_failed == false); 4245 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4246 CU_ASSERT(ctrlr2->is_failed == false); 4247 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4248 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4249 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4250 4251 set_thread(0); 4252 4253 spdk_put_io_channel(ch1); 4254 4255 set_thread(1); 4256 4257 spdk_put_io_channel(ch2); 4258 4259 poll_threads(); 4260 4261 set_thread(0); 4262 4263 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4264 CU_ASSERT(rc == 0); 4265 4266 poll_threads(); 4267 spdk_delay_us(1000); 4268 poll_threads(); 4269 4270 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4271 4272 free(first_bdev_io); 4273 free(second_bdev_io); 4274 } 4275 4276 static void 4277 test_find_io_path(void) 4278 { 4279 struct nvme_bdev_channel nbdev_ch = { 4280 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4281 }; 4282 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4283 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4284 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4285 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4286 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4287 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4288 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4289 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4290 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4291 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4292 4293 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4294 4295 /* Test if io_path whose ANA state is not accessible is excluded. */ 4296 4297 nvme_qpair1.qpair = &qpair1; 4298 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4299 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4300 4301 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4302 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4303 4304 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4305 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4306 4307 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4308 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4309 4310 nbdev_ch.current_io_path = NULL; 4311 4312 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4313 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4314 4315 nbdev_ch.current_io_path = NULL; 4316 4317 /* Test if io_path whose qpair is resetting is excluded. */ 4318 4319 nvme_qpair1.qpair = NULL; 4320 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4321 4322 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4323 4324 /* Test if ANA optimized state or the first found ANA non-optimized state 4325 * is prioritized. 4326 */ 4327 4328 nvme_qpair1.qpair = &qpair1; 4329 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4330 nvme_qpair2.qpair = &qpair2; 4331 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4332 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4333 4334 nbdev_ch.current_io_path = NULL; 4335 4336 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4337 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4338 4339 nbdev_ch.current_io_path = NULL; 4340 } 4341 4342 static void 4343 test_retry_io_if_ana_state_is_updating(void) 4344 { 4345 struct nvme_path_id path = {}; 4346 struct nvme_ctrlr_opts opts = {}; 4347 struct spdk_nvme_ctrlr *ctrlr; 4348 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 4349 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4350 struct nvme_ctrlr *nvme_ctrlr; 4351 const int STRING_SIZE = 32; 4352 const char *attached_names[STRING_SIZE]; 4353 struct nvme_bdev *bdev; 4354 struct nvme_ns *nvme_ns; 4355 struct spdk_bdev_io *bdev_io1; 4356 struct spdk_io_channel *ch; 4357 struct nvme_bdev_channel *nbdev_ch; 4358 struct nvme_io_path *io_path; 4359 struct nvme_qpair *nvme_qpair; 4360 int rc; 4361 4362 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4363 ut_init_trid(&path.trid); 4364 4365 set_thread(0); 4366 4367 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4368 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4369 4370 g_ut_attach_ctrlr_status = 0; 4371 g_ut_attach_bdev_count = 1; 4372 4373 opts.ctrlr_loss_timeout_sec = -1; 4374 opts.reconnect_delay_sec = 1; 4375 4376 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4377 attach_ctrlr_done, NULL, &dopts, &opts, false); 4378 CU_ASSERT(rc == 0); 4379 4380 spdk_delay_us(1000); 4381 poll_threads(); 4382 4383 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4384 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4385 4386 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 4387 CU_ASSERT(nvme_ctrlr != NULL); 4388 4389 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4390 CU_ASSERT(bdev != NULL); 4391 4392 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4393 CU_ASSERT(nvme_ns != NULL); 4394 4395 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4396 ut_bdev_io_set_buf(bdev_io1); 4397 4398 ch = spdk_get_io_channel(bdev); 4399 SPDK_CU_ASSERT_FATAL(ch != NULL); 4400 4401 nbdev_ch = spdk_io_channel_get_ctx(ch); 4402 4403 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4404 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4405 4406 nvme_qpair = io_path->qpair; 4407 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4408 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4409 4410 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4411 4412 /* If qpair is connected, I/O should succeed. */ 4413 bdev_io1->internal.in_submit_request = true; 4414 4415 bdev_nvme_submit_request(ch, bdev_io1); 4416 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4417 4418 poll_threads(); 4419 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4420 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4421 4422 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4423 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4424 nbdev_ch->current_io_path = NULL; 4425 4426 bdev_io1->internal.in_submit_request = true; 4427 4428 bdev_nvme_submit_request(ch, bdev_io1); 4429 4430 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4431 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4432 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4433 4434 /* ANA state became accessible while I/O was queued. */ 4435 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4436 4437 spdk_delay_us(1000000); 4438 4439 poll_thread_times(0, 1); 4440 4441 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4442 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4443 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4444 4445 poll_threads(); 4446 4447 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4448 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4449 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4450 4451 free(bdev_io1); 4452 4453 spdk_put_io_channel(ch); 4454 4455 poll_threads(); 4456 4457 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4458 CU_ASSERT(rc == 0); 4459 4460 poll_threads(); 4461 spdk_delay_us(1000); 4462 poll_threads(); 4463 4464 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4465 } 4466 4467 static void 4468 test_retry_io_for_io_path_error(void) 4469 { 4470 struct nvme_path_id path1 = {}, path2 = {}; 4471 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4472 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4473 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4474 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4475 const int STRING_SIZE = 32; 4476 const char *attached_names[STRING_SIZE]; 4477 struct nvme_bdev *bdev; 4478 struct nvme_ns *nvme_ns1, *nvme_ns2; 4479 struct spdk_bdev_io *bdev_io; 4480 struct nvme_bdev_io *bio; 4481 struct spdk_io_channel *ch; 4482 struct nvme_bdev_channel *nbdev_ch; 4483 struct nvme_io_path *io_path1, *io_path2; 4484 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4485 struct ut_nvme_req *req; 4486 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4487 int rc; 4488 4489 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4490 ut_init_trid(&path1.trid); 4491 ut_init_trid2(&path2.trid); 4492 4493 g_opts.bdev_retry_count = 1; 4494 4495 set_thread(0); 4496 4497 g_ut_attach_ctrlr_status = 0; 4498 g_ut_attach_bdev_count = 1; 4499 4500 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4501 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4502 4503 ctrlr1->ns[0].uuid = &uuid1; 4504 4505 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4506 attach_ctrlr_done, NULL, &opts, NULL, true); 4507 CU_ASSERT(rc == 0); 4508 4509 spdk_delay_us(1000); 4510 poll_threads(); 4511 4512 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4513 poll_threads(); 4514 4515 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4516 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4517 4518 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4519 CU_ASSERT(nvme_ctrlr1 != NULL); 4520 4521 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4522 CU_ASSERT(bdev != NULL); 4523 4524 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4525 CU_ASSERT(nvme_ns1 != NULL); 4526 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4527 4528 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4529 ut_bdev_io_set_buf(bdev_io); 4530 4531 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4532 4533 ch = spdk_get_io_channel(bdev); 4534 SPDK_CU_ASSERT_FATAL(ch != NULL); 4535 4536 nbdev_ch = spdk_io_channel_get_ctx(ch); 4537 4538 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4539 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4540 4541 nvme_qpair1 = io_path1->qpair; 4542 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4543 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4544 4545 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4546 4547 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4548 bdev_io->internal.in_submit_request = true; 4549 4550 bdev_nvme_submit_request(ch, bdev_io); 4551 4552 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4553 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4554 4555 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4556 SPDK_CU_ASSERT_FATAL(req != NULL); 4557 4558 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4559 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4560 req->cpl.status.dnr = 1; 4561 4562 poll_thread_times(0, 1); 4563 4564 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4565 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4566 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4567 4568 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4569 bdev_io->internal.in_submit_request = true; 4570 4571 bdev_nvme_submit_request(ch, bdev_io); 4572 4573 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4574 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4575 4576 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4577 SPDK_CU_ASSERT_FATAL(req != NULL); 4578 4579 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4580 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4581 4582 poll_thread_times(0, 1); 4583 4584 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4585 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4586 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4587 4588 poll_threads(); 4589 4590 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4591 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4592 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4593 4594 /* Add io_path2 dynamically, and create a multipath configuration. */ 4595 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4596 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4597 4598 ctrlr2->ns[0].uuid = &uuid1; 4599 4600 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4601 attach_ctrlr_done, NULL, &opts, NULL, true); 4602 CU_ASSERT(rc == 0); 4603 4604 spdk_delay_us(1000); 4605 poll_threads(); 4606 4607 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4608 poll_threads(); 4609 4610 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4611 CU_ASSERT(nvme_ctrlr2 != NULL); 4612 4613 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4614 CU_ASSERT(nvme_ns2 != NULL); 4615 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4616 4617 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4618 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4619 4620 nvme_qpair2 = io_path2->qpair; 4621 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4622 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4623 4624 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4625 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4626 * So after a retry, I/O is submitted to io_path2 and should succeed. 4627 */ 4628 bdev_io->internal.in_submit_request = true; 4629 4630 bdev_nvme_submit_request(ch, bdev_io); 4631 4632 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4633 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4634 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4635 4636 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4637 SPDK_CU_ASSERT_FATAL(req != NULL); 4638 4639 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4640 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4641 4642 poll_thread_times(0, 1); 4643 4644 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4645 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4646 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4647 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4648 4649 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4650 nvme_qpair1->qpair = NULL; 4651 4652 poll_threads(); 4653 4654 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4655 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4656 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4657 4658 free(bdev_io); 4659 4660 spdk_put_io_channel(ch); 4661 4662 poll_threads(); 4663 4664 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4665 CU_ASSERT(rc == 0); 4666 4667 poll_threads(); 4668 spdk_delay_us(1000); 4669 poll_threads(); 4670 4671 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4672 4673 g_opts.bdev_retry_count = 0; 4674 } 4675 4676 static void 4677 test_retry_io_count(void) 4678 { 4679 struct nvme_path_id path = {}; 4680 struct spdk_nvme_ctrlr *ctrlr; 4681 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4682 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4683 struct nvme_ctrlr *nvme_ctrlr; 4684 const int STRING_SIZE = 32; 4685 const char *attached_names[STRING_SIZE]; 4686 struct nvme_bdev *bdev; 4687 struct nvme_ns *nvme_ns; 4688 struct spdk_bdev_io *bdev_io; 4689 struct nvme_bdev_io *bio; 4690 struct spdk_io_channel *ch; 4691 struct nvme_bdev_channel *nbdev_ch; 4692 struct nvme_io_path *io_path; 4693 struct nvme_qpair *nvme_qpair; 4694 struct ut_nvme_req *req; 4695 int rc; 4696 4697 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4698 ut_init_trid(&path.trid); 4699 4700 set_thread(0); 4701 4702 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4703 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4704 4705 g_ut_attach_ctrlr_status = 0; 4706 g_ut_attach_bdev_count = 1; 4707 4708 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4709 attach_ctrlr_done, NULL, &opts, NULL, false); 4710 CU_ASSERT(rc == 0); 4711 4712 spdk_delay_us(1000); 4713 poll_threads(); 4714 4715 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4716 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4717 4718 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 4719 CU_ASSERT(nvme_ctrlr != NULL); 4720 4721 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4722 CU_ASSERT(bdev != NULL); 4723 4724 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4725 CU_ASSERT(nvme_ns != NULL); 4726 4727 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4728 ut_bdev_io_set_buf(bdev_io); 4729 4730 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4731 4732 ch = spdk_get_io_channel(bdev); 4733 SPDK_CU_ASSERT_FATAL(ch != NULL); 4734 4735 nbdev_ch = spdk_io_channel_get_ctx(ch); 4736 4737 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4738 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4739 4740 nvme_qpair = io_path->qpair; 4741 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4742 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4743 4744 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4745 4746 /* If I/O is aborted by request, it should not be retried. */ 4747 g_opts.bdev_retry_count = 1; 4748 4749 bdev_io->internal.in_submit_request = true; 4750 4751 bdev_nvme_submit_request(ch, bdev_io); 4752 4753 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4754 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4755 4756 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4757 SPDK_CU_ASSERT_FATAL(req != NULL); 4758 4759 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4760 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4761 4762 poll_thread_times(0, 1); 4763 4764 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4765 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4766 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4767 4768 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4769 * the failed I/O should not be retried. 4770 */ 4771 g_opts.bdev_retry_count = 4; 4772 4773 bdev_io->internal.in_submit_request = true; 4774 4775 bdev_nvme_submit_request(ch, bdev_io); 4776 4777 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4778 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4779 4780 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4781 SPDK_CU_ASSERT_FATAL(req != NULL); 4782 4783 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4784 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4785 bio->retry_count = 4; 4786 4787 poll_thread_times(0, 1); 4788 4789 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4790 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4791 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4792 4793 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4794 g_opts.bdev_retry_count = -1; 4795 4796 bdev_io->internal.in_submit_request = true; 4797 4798 bdev_nvme_submit_request(ch, bdev_io); 4799 4800 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4801 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4802 4803 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4804 SPDK_CU_ASSERT_FATAL(req != NULL); 4805 4806 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4807 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4808 bio->retry_count = 4; 4809 4810 poll_thread_times(0, 1); 4811 4812 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4813 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4814 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4815 4816 poll_threads(); 4817 4818 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4819 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4820 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4821 4822 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4823 * the failed I/O should be retried. 4824 */ 4825 g_opts.bdev_retry_count = 4; 4826 4827 bdev_io->internal.in_submit_request = true; 4828 4829 bdev_nvme_submit_request(ch, bdev_io); 4830 4831 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4832 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4833 4834 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4835 SPDK_CU_ASSERT_FATAL(req != NULL); 4836 4837 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4838 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4839 bio->retry_count = 3; 4840 4841 poll_thread_times(0, 1); 4842 4843 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4844 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4845 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4846 4847 poll_threads(); 4848 4849 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4850 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4851 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4852 4853 free(bdev_io); 4854 4855 spdk_put_io_channel(ch); 4856 4857 poll_threads(); 4858 4859 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4860 CU_ASSERT(rc == 0); 4861 4862 poll_threads(); 4863 spdk_delay_us(1000); 4864 poll_threads(); 4865 4866 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4867 4868 g_opts.bdev_retry_count = 0; 4869 } 4870 4871 static void 4872 test_concurrent_read_ana_log_page(void) 4873 { 4874 struct spdk_nvme_transport_id trid = {}; 4875 struct spdk_nvme_ctrlr *ctrlr; 4876 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4877 struct nvme_ctrlr *nvme_ctrlr; 4878 const int STRING_SIZE = 32; 4879 const char *attached_names[STRING_SIZE]; 4880 int rc; 4881 4882 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4883 ut_init_trid(&trid); 4884 4885 set_thread(0); 4886 4887 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4888 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4889 4890 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4891 4892 g_ut_attach_ctrlr_status = 0; 4893 g_ut_attach_bdev_count = 1; 4894 4895 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4896 attach_ctrlr_done, NULL, &opts, NULL, false); 4897 CU_ASSERT(rc == 0); 4898 4899 spdk_delay_us(1000); 4900 poll_threads(); 4901 4902 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4903 poll_threads(); 4904 4905 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4906 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4907 4908 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4909 4910 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4911 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4912 4913 /* Following read request should be rejected. */ 4914 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4915 4916 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4917 4918 set_thread(1); 4919 4920 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4921 4922 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4923 4924 /* Reset request while reading ANA log page should not be rejected. */ 4925 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4926 CU_ASSERT(rc == 0); 4927 4928 poll_threads(); 4929 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4930 poll_threads(); 4931 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4932 poll_threads(); 4933 4934 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4935 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4936 4937 /* Read ANA log page while resetting ctrlr should be rejected. */ 4938 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4939 CU_ASSERT(rc == 0); 4940 4941 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4942 4943 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4944 4945 poll_threads(); 4946 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4947 poll_threads(); 4948 4949 set_thread(0); 4950 4951 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4952 CU_ASSERT(rc == 0); 4953 4954 poll_threads(); 4955 spdk_delay_us(1000); 4956 poll_threads(); 4957 4958 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4959 } 4960 4961 static void 4962 test_retry_io_for_ana_error(void) 4963 { 4964 struct nvme_path_id path = {}; 4965 struct spdk_nvme_ctrlr *ctrlr; 4966 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4967 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4968 struct nvme_ctrlr *nvme_ctrlr; 4969 const int STRING_SIZE = 32; 4970 const char *attached_names[STRING_SIZE]; 4971 struct nvme_bdev *bdev; 4972 struct nvme_ns *nvme_ns; 4973 struct spdk_bdev_io *bdev_io; 4974 struct nvme_bdev_io *bio; 4975 struct spdk_io_channel *ch; 4976 struct nvme_bdev_channel *nbdev_ch; 4977 struct nvme_io_path *io_path; 4978 struct nvme_qpair *nvme_qpair; 4979 struct ut_nvme_req *req; 4980 uint64_t now; 4981 int rc; 4982 4983 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4984 ut_init_trid(&path.trid); 4985 4986 g_opts.bdev_retry_count = 1; 4987 4988 set_thread(0); 4989 4990 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4991 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4992 4993 g_ut_attach_ctrlr_status = 0; 4994 g_ut_attach_bdev_count = 1; 4995 4996 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4997 attach_ctrlr_done, NULL, &opts, NULL, false); 4998 CU_ASSERT(rc == 0); 4999 5000 spdk_delay_us(1000); 5001 poll_threads(); 5002 5003 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5004 poll_threads(); 5005 5006 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5007 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5008 5009 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 5010 CU_ASSERT(nvme_ctrlr != NULL); 5011 5012 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5013 CU_ASSERT(bdev != NULL); 5014 5015 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5016 CU_ASSERT(nvme_ns != NULL); 5017 5018 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5019 ut_bdev_io_set_buf(bdev_io); 5020 5021 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5022 5023 ch = spdk_get_io_channel(bdev); 5024 SPDK_CU_ASSERT_FATAL(ch != NULL); 5025 5026 nbdev_ch = spdk_io_channel_get_ctx(ch); 5027 5028 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5029 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5030 5031 nvme_qpair = io_path->qpair; 5032 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5033 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5034 5035 now = spdk_get_ticks(); 5036 5037 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5038 5039 /* If I/O got ANA error, it should be queued, the corresponding namespace 5040 * should be freezed and its ANA state should be updated. 5041 */ 5042 bdev_io->internal.in_submit_request = true; 5043 5044 bdev_nvme_submit_request(ch, bdev_io); 5045 5046 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5047 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5048 5049 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5050 SPDK_CU_ASSERT_FATAL(req != NULL); 5051 5052 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5053 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5054 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5055 5056 poll_thread_times(0, 1); 5057 5058 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5059 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5060 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5061 /* I/O should be retried immediately. */ 5062 CU_ASSERT(bio->retry_ticks == now); 5063 CU_ASSERT(nvme_ns->ana_state_updating == true); 5064 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5065 5066 poll_threads(); 5067 5068 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5069 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5070 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5071 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5072 /* I/O should be retried after a second if no I/O path was found but 5073 * any I/O path may become available. 5074 */ 5075 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5076 5077 /* Namespace should be unfreezed after completing to update its ANA state. */ 5078 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5079 poll_threads(); 5080 5081 CU_ASSERT(nvme_ns->ana_state_updating == false); 5082 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5083 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5084 5085 /* Retry the queued I/O should succeed. */ 5086 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5087 poll_threads(); 5088 5089 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5090 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5091 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5092 5093 free(bdev_io); 5094 5095 spdk_put_io_channel(ch); 5096 5097 poll_threads(); 5098 5099 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5100 CU_ASSERT(rc == 0); 5101 5102 poll_threads(); 5103 spdk_delay_us(1000); 5104 poll_threads(); 5105 5106 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5107 5108 g_opts.bdev_retry_count = 0; 5109 } 5110 5111 static void 5112 test_check_io_error_resiliency_params(void) 5113 { 5114 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5115 * 3rd parameter is fast_io_fail_timeout_sec. 5116 */ 5117 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5118 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5119 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5120 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5121 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5122 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5123 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5124 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5125 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5126 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5127 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5128 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5129 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5130 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5131 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5132 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5133 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5134 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5135 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5136 } 5137 5138 static void 5139 test_retry_io_if_ctrlr_is_resetting(void) 5140 { 5141 struct nvme_path_id path = {}; 5142 struct nvme_ctrlr_opts opts = {}; 5143 struct spdk_nvme_ctrlr *ctrlr; 5144 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5145 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5146 struct nvme_ctrlr *nvme_ctrlr; 5147 const int STRING_SIZE = 32; 5148 const char *attached_names[STRING_SIZE]; 5149 struct nvme_bdev *bdev; 5150 struct nvme_ns *nvme_ns; 5151 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5152 struct spdk_io_channel *ch; 5153 struct nvme_bdev_channel *nbdev_ch; 5154 struct nvme_io_path *io_path; 5155 struct nvme_qpair *nvme_qpair; 5156 int rc; 5157 5158 g_opts.bdev_retry_count = 1; 5159 5160 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5161 ut_init_trid(&path.trid); 5162 5163 set_thread(0); 5164 5165 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5166 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5167 5168 g_ut_attach_ctrlr_status = 0; 5169 g_ut_attach_bdev_count = 1; 5170 5171 opts.ctrlr_loss_timeout_sec = -1; 5172 opts.reconnect_delay_sec = 1; 5173 5174 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5175 attach_ctrlr_done, NULL, &dopts, &opts, false); 5176 CU_ASSERT(rc == 0); 5177 5178 spdk_delay_us(1000); 5179 poll_threads(); 5180 5181 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5182 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5183 5184 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5185 CU_ASSERT(nvme_ctrlr != NULL); 5186 5187 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5188 CU_ASSERT(bdev != NULL); 5189 5190 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5191 CU_ASSERT(nvme_ns != NULL); 5192 5193 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5194 ut_bdev_io_set_buf(bdev_io1); 5195 5196 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5197 ut_bdev_io_set_buf(bdev_io2); 5198 5199 ch = spdk_get_io_channel(bdev); 5200 SPDK_CU_ASSERT_FATAL(ch != NULL); 5201 5202 nbdev_ch = spdk_io_channel_get_ctx(ch); 5203 5204 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5205 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5206 5207 nvme_qpair = io_path->qpair; 5208 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5209 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5210 5211 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5212 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5213 5214 /* If qpair is connected, I/O should succeed. */ 5215 bdev_io1->internal.in_submit_request = true; 5216 5217 bdev_nvme_submit_request(ch, bdev_io1); 5218 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5219 5220 poll_threads(); 5221 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5222 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5223 5224 /* If qpair is disconnected, it is freed and then reconnected via resetting 5225 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5226 * while resetting the nvme_ctrlr. 5227 */ 5228 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5229 ctrlr->is_failed = true; 5230 5231 poll_thread_times(0, 5); 5232 5233 CU_ASSERT(nvme_qpair->qpair == NULL); 5234 CU_ASSERT(nvme_ctrlr->resetting == true); 5235 CU_ASSERT(ctrlr->is_failed == false); 5236 5237 bdev_io1->internal.in_submit_request = true; 5238 5239 bdev_nvme_submit_request(ch, bdev_io1); 5240 5241 spdk_delay_us(1); 5242 5243 bdev_io2->internal.in_submit_request = true; 5244 5245 bdev_nvme_submit_request(ch, bdev_io2); 5246 5247 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5248 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5249 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5250 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5251 5252 poll_threads(); 5253 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5254 poll_threads(); 5255 5256 CU_ASSERT(nvme_qpair->qpair != NULL); 5257 CU_ASSERT(nvme_ctrlr->resetting == false); 5258 5259 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5260 5261 poll_thread_times(0, 1); 5262 5263 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5264 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5265 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5266 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5267 5268 poll_threads(); 5269 5270 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5271 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5272 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5273 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5274 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5275 5276 spdk_delay_us(1); 5277 5278 poll_thread_times(0, 1); 5279 5280 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5281 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5282 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5283 5284 poll_threads(); 5285 5286 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5287 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5288 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5289 5290 free(bdev_io1); 5291 free(bdev_io2); 5292 5293 spdk_put_io_channel(ch); 5294 5295 poll_threads(); 5296 5297 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5298 CU_ASSERT(rc == 0); 5299 5300 poll_threads(); 5301 spdk_delay_us(1000); 5302 poll_threads(); 5303 5304 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5305 5306 g_opts.bdev_retry_count = 0; 5307 } 5308 5309 static void 5310 test_reconnect_ctrlr(void) 5311 { 5312 struct spdk_nvme_transport_id trid = {}; 5313 struct spdk_nvme_ctrlr ctrlr = {}; 5314 struct nvme_ctrlr *nvme_ctrlr; 5315 struct spdk_io_channel *ch1, *ch2; 5316 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5317 int rc; 5318 5319 ut_init_trid(&trid); 5320 TAILQ_INIT(&ctrlr.active_io_qpairs); 5321 5322 set_thread(0); 5323 5324 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5325 CU_ASSERT(rc == 0); 5326 5327 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5328 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5329 5330 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5331 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5332 5333 ch1 = spdk_get_io_channel(nvme_ctrlr); 5334 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5335 5336 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5337 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5338 5339 set_thread(1); 5340 5341 ch2 = spdk_get_io_channel(nvme_ctrlr); 5342 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5343 5344 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5345 5346 /* Reset starts from thread 1. */ 5347 set_thread(1); 5348 5349 /* The reset should fail and a reconnect timer should be registered. */ 5350 ctrlr.fail_reset = true; 5351 ctrlr.is_failed = true; 5352 5353 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5354 CU_ASSERT(rc == 0); 5355 CU_ASSERT(nvme_ctrlr->resetting == true); 5356 CU_ASSERT(ctrlr.is_failed == true); 5357 5358 poll_threads(); 5359 5360 CU_ASSERT(nvme_ctrlr->resetting == false); 5361 CU_ASSERT(ctrlr.is_failed == false); 5362 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5363 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5364 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5365 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5366 5367 /* A new reset starts from thread 0. */ 5368 set_thread(1); 5369 5370 /* The reset should cancel the reconnect timer and should start from reconnection. 5371 * Then, the reset should fail and a reconnect timer should be registered again. 5372 */ 5373 ctrlr.fail_reset = true; 5374 ctrlr.is_failed = true; 5375 5376 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5377 CU_ASSERT(rc == 0); 5378 CU_ASSERT(nvme_ctrlr->resetting == true); 5379 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5380 CU_ASSERT(ctrlr.is_failed == true); 5381 5382 poll_threads(); 5383 5384 CU_ASSERT(nvme_ctrlr->resetting == false); 5385 CU_ASSERT(ctrlr.is_failed == false); 5386 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5387 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5388 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5389 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5390 5391 /* Then a reconnect retry should suceeed. */ 5392 ctrlr.fail_reset = false; 5393 5394 spdk_delay_us(SPDK_SEC_TO_USEC); 5395 poll_thread_times(0, 1); 5396 5397 CU_ASSERT(nvme_ctrlr->resetting == true); 5398 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5399 5400 poll_threads(); 5401 5402 CU_ASSERT(nvme_ctrlr->resetting == false); 5403 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5404 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5405 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5406 5407 /* The reset should fail and a reconnect timer should be registered. */ 5408 ctrlr.fail_reset = true; 5409 ctrlr.is_failed = true; 5410 5411 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5412 CU_ASSERT(rc == 0); 5413 CU_ASSERT(nvme_ctrlr->resetting == true); 5414 CU_ASSERT(ctrlr.is_failed == true); 5415 5416 poll_threads(); 5417 5418 CU_ASSERT(nvme_ctrlr->resetting == false); 5419 CU_ASSERT(ctrlr.is_failed == false); 5420 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5421 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5422 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5423 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5424 5425 /* Then a reconnect retry should still fail. */ 5426 spdk_delay_us(SPDK_SEC_TO_USEC); 5427 poll_thread_times(0, 1); 5428 5429 CU_ASSERT(nvme_ctrlr->resetting == true); 5430 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5431 5432 poll_threads(); 5433 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5434 poll_threads(); 5435 5436 CU_ASSERT(nvme_ctrlr->resetting == false); 5437 CU_ASSERT(ctrlr.is_failed == false); 5438 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5439 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5440 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5441 5442 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5443 spdk_delay_us(SPDK_SEC_TO_USEC); 5444 poll_threads(); 5445 5446 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5447 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5448 CU_ASSERT(nvme_ctrlr->destruct == true); 5449 5450 spdk_put_io_channel(ch2); 5451 5452 set_thread(0); 5453 5454 spdk_put_io_channel(ch1); 5455 5456 poll_threads(); 5457 spdk_delay_us(1000); 5458 poll_threads(); 5459 5460 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5461 } 5462 5463 static struct nvme_path_id * 5464 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5465 const struct spdk_nvme_transport_id *trid) 5466 { 5467 struct nvme_path_id *p; 5468 5469 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5470 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5471 break; 5472 } 5473 } 5474 5475 return p; 5476 } 5477 5478 static void 5479 test_retry_failover_ctrlr(void) 5480 { 5481 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5482 struct spdk_nvme_ctrlr ctrlr = {}; 5483 struct nvme_ctrlr *nvme_ctrlr = NULL; 5484 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5485 struct spdk_io_channel *ch; 5486 struct nvme_ctrlr_channel *ctrlr_ch; 5487 int rc; 5488 5489 ut_init_trid(&trid1); 5490 ut_init_trid2(&trid2); 5491 ut_init_trid3(&trid3); 5492 TAILQ_INIT(&ctrlr.active_io_qpairs); 5493 5494 set_thread(0); 5495 5496 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5497 CU_ASSERT(rc == 0); 5498 5499 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5500 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5501 5502 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5503 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5504 5505 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5506 CU_ASSERT(rc == 0); 5507 5508 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5509 CU_ASSERT(rc == 0); 5510 5511 ch = spdk_get_io_channel(nvme_ctrlr); 5512 SPDK_CU_ASSERT_FATAL(ch != NULL); 5513 5514 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5515 5516 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5517 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5518 CU_ASSERT(path_id1->last_failed_tsc == 0); 5519 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5520 5521 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5522 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5523 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5524 5525 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5526 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5527 5528 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5529 * and a reconnect timer is started. */ 5530 ctrlr.fail_reset = true; 5531 ctrlr.is_failed = true; 5532 5533 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5534 CU_ASSERT(rc == 0); 5535 5536 poll_threads(); 5537 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5538 poll_threads(); 5539 5540 CU_ASSERT(nvme_ctrlr->resetting == false); 5541 CU_ASSERT(ctrlr.is_failed == false); 5542 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5543 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5544 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5545 CU_ASSERT(path_id1->last_failed_tsc != 0); 5546 5547 CU_ASSERT(path_id2->last_failed_tsc != 0); 5548 CU_ASSERT(path_id3->last_failed_tsc != 0); 5549 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5550 5551 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5552 * switched to trid2 but reset is not started. 5553 */ 5554 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5555 CU_ASSERT(rc == -EALREADY); 5556 5557 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5558 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5559 5560 CU_ASSERT(nvme_ctrlr->resetting == false); 5561 5562 /* If reconnect succeeds, trid2 should be the active path_id */ 5563 ctrlr.fail_reset = false; 5564 5565 spdk_delay_us(SPDK_SEC_TO_USEC); 5566 poll_thread_times(0, 1); 5567 5568 CU_ASSERT(nvme_ctrlr->resetting == true); 5569 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5570 5571 poll_threads(); 5572 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5573 poll_threads(); 5574 5575 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5576 CU_ASSERT(path_id2->last_failed_tsc == 0); 5577 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5578 CU_ASSERT(nvme_ctrlr->resetting == false); 5579 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5580 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5581 5582 spdk_put_io_channel(ch); 5583 5584 poll_threads(); 5585 5586 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5587 CU_ASSERT(rc == 0); 5588 5589 poll_threads(); 5590 spdk_delay_us(1000); 5591 poll_threads(); 5592 5593 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5594 } 5595 5596 static void 5597 test_fail_path(void) 5598 { 5599 struct nvme_path_id path = {}; 5600 struct nvme_ctrlr_opts opts = {}; 5601 struct spdk_nvme_ctrlr *ctrlr; 5602 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5603 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5604 struct nvme_ctrlr *nvme_ctrlr; 5605 const int STRING_SIZE = 32; 5606 const char *attached_names[STRING_SIZE]; 5607 struct nvme_bdev *bdev; 5608 struct nvme_ns *nvme_ns; 5609 struct spdk_bdev_io *bdev_io; 5610 struct spdk_io_channel *ch; 5611 struct nvme_bdev_channel *nbdev_ch; 5612 struct nvme_io_path *io_path; 5613 struct nvme_ctrlr_channel *ctrlr_ch; 5614 int rc; 5615 5616 /* The test scenario is the following. 5617 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5618 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5619 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5620 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5621 * comes first. The queued I/O is failed. 5622 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5623 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5624 */ 5625 5626 g_opts.bdev_retry_count = 1; 5627 5628 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5629 ut_init_trid(&path.trid); 5630 5631 set_thread(0); 5632 5633 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5634 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5635 5636 g_ut_attach_ctrlr_status = 0; 5637 g_ut_attach_bdev_count = 1; 5638 5639 opts.ctrlr_loss_timeout_sec = 4; 5640 opts.reconnect_delay_sec = 1; 5641 opts.fast_io_fail_timeout_sec = 2; 5642 5643 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5644 attach_ctrlr_done, NULL, &dopts, &opts, false); 5645 CU_ASSERT(rc == 0); 5646 5647 spdk_delay_us(1000); 5648 poll_threads(); 5649 5650 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5651 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5652 5653 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5654 CU_ASSERT(nvme_ctrlr != NULL); 5655 5656 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5657 CU_ASSERT(bdev != NULL); 5658 5659 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5660 CU_ASSERT(nvme_ns != NULL); 5661 5662 ch = spdk_get_io_channel(bdev); 5663 SPDK_CU_ASSERT_FATAL(ch != NULL); 5664 5665 nbdev_ch = spdk_io_channel_get_ctx(ch); 5666 5667 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5668 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5669 5670 ctrlr_ch = io_path->qpair->ctrlr_ch; 5671 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5672 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5673 5674 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5675 ut_bdev_io_set_buf(bdev_io); 5676 5677 5678 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5679 ctrlr->fail_reset = true; 5680 ctrlr->is_failed = true; 5681 5682 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5683 CU_ASSERT(rc == 0); 5684 CU_ASSERT(nvme_ctrlr->resetting == true); 5685 CU_ASSERT(ctrlr->is_failed == true); 5686 5687 poll_threads(); 5688 5689 CU_ASSERT(nvme_ctrlr->resetting == false); 5690 CU_ASSERT(ctrlr->is_failed == false); 5691 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5692 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5693 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5694 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5695 5696 /* I/O should be queued. */ 5697 bdev_io->internal.in_submit_request = true; 5698 5699 bdev_nvme_submit_request(ch, bdev_io); 5700 5701 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5702 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5703 5704 /* After a second, the I/O should be still queued and the ctrlr should be 5705 * still recovering. 5706 */ 5707 spdk_delay_us(SPDK_SEC_TO_USEC); 5708 poll_threads(); 5709 5710 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5711 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5712 5713 CU_ASSERT(nvme_ctrlr->resetting == false); 5714 CU_ASSERT(ctrlr->is_failed == false); 5715 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5716 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5717 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5718 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5719 5720 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5721 5722 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5723 spdk_delay_us(SPDK_SEC_TO_USEC); 5724 poll_threads(); 5725 5726 CU_ASSERT(nvme_ctrlr->resetting == false); 5727 CU_ASSERT(ctrlr->is_failed == false); 5728 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5729 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5730 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5731 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5732 5733 /* Then within a second, pending I/O should be failed. */ 5734 spdk_delay_us(SPDK_SEC_TO_USEC); 5735 poll_threads(); 5736 5737 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5738 poll_threads(); 5739 5740 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5741 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5742 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5743 5744 /* Another I/O submission should be failed immediately. */ 5745 bdev_io->internal.in_submit_request = true; 5746 5747 bdev_nvme_submit_request(ch, bdev_io); 5748 5749 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5750 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5751 5752 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5753 * be deleted. 5754 */ 5755 spdk_delay_us(SPDK_SEC_TO_USEC); 5756 poll_threads(); 5757 5758 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5759 poll_threads(); 5760 5761 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5762 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5763 CU_ASSERT(nvme_ctrlr->destruct == true); 5764 5765 spdk_put_io_channel(ch); 5766 5767 poll_threads(); 5768 spdk_delay_us(1000); 5769 poll_threads(); 5770 5771 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5772 5773 free(bdev_io); 5774 5775 g_opts.bdev_retry_count = 0; 5776 } 5777 5778 static void 5779 test_nvme_ns_cmp(void) 5780 { 5781 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5782 5783 nvme_ns1.id = 0; 5784 nvme_ns2.id = UINT32_MAX; 5785 5786 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5787 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5788 } 5789 5790 static void 5791 test_ana_transition(void) 5792 { 5793 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5794 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5795 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5796 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5797 5798 /* case 1: ANA transition timedout is canceled. */ 5799 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5800 nvme_ns.ana_transition_timedout = true; 5801 5802 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5803 5804 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5805 5806 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5807 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5808 5809 /* case 2: ANATT timer is kept. */ 5810 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5811 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5812 &nvme_ns, 5813 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5814 5815 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5816 5817 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5818 5819 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5820 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5821 5822 /* case 3: ANATT timer is stopped. */ 5823 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5824 5825 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5826 5827 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5828 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5829 5830 /* ANATT timer is started. */ 5831 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5832 5833 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5834 5835 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5836 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5837 5838 /* ANATT timer is expired. */ 5839 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5840 5841 poll_threads(); 5842 5843 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5844 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5845 } 5846 5847 static void 5848 _set_preferred_path_cb(void *cb_arg, int rc) 5849 { 5850 bool *done = cb_arg; 5851 5852 *done = true; 5853 } 5854 5855 static void 5856 test_set_preferred_path(void) 5857 { 5858 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5859 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5860 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5861 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5862 const int STRING_SIZE = 32; 5863 const char *attached_names[STRING_SIZE]; 5864 struct nvme_bdev *bdev; 5865 struct spdk_io_channel *ch; 5866 struct nvme_bdev_channel *nbdev_ch; 5867 struct nvme_io_path *io_path; 5868 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5869 const struct spdk_nvme_ctrlr_data *cdata; 5870 bool done; 5871 int rc; 5872 5873 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5874 ut_init_trid(&path1.trid); 5875 ut_init_trid2(&path2.trid); 5876 ut_init_trid3(&path3.trid); 5877 g_ut_attach_ctrlr_status = 0; 5878 g_ut_attach_bdev_count = 1; 5879 5880 set_thread(0); 5881 5882 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5883 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5884 5885 ctrlr1->ns[0].uuid = &uuid1; 5886 5887 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5888 attach_ctrlr_done, NULL, &opts, NULL, true); 5889 CU_ASSERT(rc == 0); 5890 5891 spdk_delay_us(1000); 5892 poll_threads(); 5893 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5894 poll_threads(); 5895 5896 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5897 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5898 5899 ctrlr2->ns[0].uuid = &uuid1; 5900 5901 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5902 attach_ctrlr_done, NULL, &opts, NULL, true); 5903 CU_ASSERT(rc == 0); 5904 5905 spdk_delay_us(1000); 5906 poll_threads(); 5907 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5908 poll_threads(); 5909 5910 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5911 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5912 5913 ctrlr3->ns[0].uuid = &uuid1; 5914 5915 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5916 attach_ctrlr_done, NULL, &opts, NULL, true); 5917 CU_ASSERT(rc == 0); 5918 5919 spdk_delay_us(1000); 5920 poll_threads(); 5921 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5922 poll_threads(); 5923 5924 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5925 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5926 5927 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5928 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5929 5930 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5931 5932 ch = spdk_get_io_channel(bdev); 5933 SPDK_CU_ASSERT_FATAL(ch != NULL); 5934 nbdev_ch = spdk_io_channel_get_ctx(ch); 5935 5936 io_path = bdev_nvme_find_io_path(nbdev_ch); 5937 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5938 5939 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5940 5941 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5942 * should return io_path to ctrlr2. 5943 */ 5944 5945 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5946 done = false; 5947 5948 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5949 5950 poll_threads(); 5951 CU_ASSERT(done == true); 5952 5953 io_path = bdev_nvme_find_io_path(nbdev_ch); 5954 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5955 5956 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5957 5958 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5959 * acquired, find_io_path() should return io_path to ctrlr3. 5960 */ 5961 5962 spdk_put_io_channel(ch); 5963 5964 poll_threads(); 5965 5966 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5967 done = false; 5968 5969 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5970 5971 poll_threads(); 5972 CU_ASSERT(done == true); 5973 5974 ch = spdk_get_io_channel(bdev); 5975 SPDK_CU_ASSERT_FATAL(ch != NULL); 5976 nbdev_ch = spdk_io_channel_get_ctx(ch); 5977 5978 io_path = bdev_nvme_find_io_path(nbdev_ch); 5979 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5980 5981 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5982 5983 spdk_put_io_channel(ch); 5984 5985 poll_threads(); 5986 5987 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5988 CU_ASSERT(rc == 0); 5989 5990 poll_threads(); 5991 spdk_delay_us(1000); 5992 poll_threads(); 5993 5994 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5995 } 5996 5997 static void 5998 test_find_next_io_path(void) 5999 { 6000 struct nvme_bdev_channel nbdev_ch = { 6001 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6002 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6003 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 6004 }; 6005 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6006 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6007 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6008 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6009 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6010 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6011 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6012 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6013 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6014 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6015 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6016 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6017 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6018 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6019 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6020 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6021 6022 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6023 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6024 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6025 6026 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6027 * is covered in test_find_io_path. 6028 */ 6029 6030 nbdev_ch.current_io_path = &io_path2; 6031 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6032 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6033 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6034 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6035 6036 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6037 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6038 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6039 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6040 6041 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6042 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6043 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6044 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6045 6046 nbdev_ch.current_io_path = &io_path3; 6047 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6048 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6049 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6050 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6051 6052 /* Test if next io_path is selected according to rr_min_io */ 6053 6054 nbdev_ch.current_io_path = NULL; 6055 nbdev_ch.rr_min_io = 2; 6056 nbdev_ch.rr_counter = 0; 6057 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6058 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6059 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6060 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6061 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6062 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6063 6064 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6065 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6066 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6067 } 6068 6069 static void 6070 test_find_io_path_min_qd(void) 6071 { 6072 struct nvme_bdev_channel nbdev_ch = { 6073 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6074 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6075 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6076 }; 6077 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6078 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6079 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6080 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6081 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6082 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6083 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6084 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6085 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6086 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6087 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6088 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6089 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6090 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6091 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6092 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6093 6094 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6095 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6096 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6097 6098 /* Test if the minumum io_outstanding or the ANA optimized state is 6099 * prioritized when using least queue depth selector 6100 */ 6101 qpair1.num_outstanding_reqs = 2; 6102 qpair2.num_outstanding_reqs = 1; 6103 qpair3.num_outstanding_reqs = 0; 6104 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6105 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6106 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6107 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6108 6109 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6110 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6111 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6112 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6113 6114 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6115 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6116 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6117 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6118 6119 qpair2.num_outstanding_reqs = 4; 6120 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6121 } 6122 6123 static void 6124 test_disable_auto_failback(void) 6125 { 6126 struct nvme_path_id path1 = {}, path2 = {}; 6127 struct nvme_ctrlr_opts opts = {}; 6128 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6129 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6130 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6131 struct nvme_ctrlr *nvme_ctrlr1; 6132 const int STRING_SIZE = 32; 6133 const char *attached_names[STRING_SIZE]; 6134 struct nvme_bdev *bdev; 6135 struct spdk_io_channel *ch; 6136 struct nvme_bdev_channel *nbdev_ch; 6137 struct nvme_io_path *io_path; 6138 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6139 const struct spdk_nvme_ctrlr_data *cdata; 6140 bool done; 6141 int rc; 6142 6143 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6144 ut_init_trid(&path1.trid); 6145 ut_init_trid2(&path2.trid); 6146 g_ut_attach_ctrlr_status = 0; 6147 g_ut_attach_bdev_count = 1; 6148 6149 g_opts.disable_auto_failback = true; 6150 6151 opts.ctrlr_loss_timeout_sec = -1; 6152 opts.reconnect_delay_sec = 1; 6153 6154 set_thread(0); 6155 6156 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6157 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6158 6159 ctrlr1->ns[0].uuid = &uuid1; 6160 6161 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6162 attach_ctrlr_done, NULL, &dopts, &opts, true); 6163 CU_ASSERT(rc == 0); 6164 6165 spdk_delay_us(1000); 6166 poll_threads(); 6167 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6168 poll_threads(); 6169 6170 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6171 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6172 6173 ctrlr2->ns[0].uuid = &uuid1; 6174 6175 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6176 attach_ctrlr_done, NULL, &dopts, &opts, true); 6177 CU_ASSERT(rc == 0); 6178 6179 spdk_delay_us(1000); 6180 poll_threads(); 6181 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6182 poll_threads(); 6183 6184 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6185 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6186 6187 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6188 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6189 6190 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn); 6191 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6192 6193 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6194 6195 ch = spdk_get_io_channel(bdev); 6196 SPDK_CU_ASSERT_FATAL(ch != NULL); 6197 nbdev_ch = spdk_io_channel_get_ctx(ch); 6198 6199 io_path = bdev_nvme_find_io_path(nbdev_ch); 6200 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6201 6202 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6203 6204 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6205 ctrlr1->fail_reset = true; 6206 ctrlr1->is_failed = true; 6207 6208 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6209 6210 poll_threads(); 6211 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6212 poll_threads(); 6213 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6214 poll_threads(); 6215 6216 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6217 6218 io_path = bdev_nvme_find_io_path(nbdev_ch); 6219 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6220 6221 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6222 6223 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6224 * Hence, io_path to ctrlr2 should still be used. 6225 */ 6226 ctrlr1->fail_reset = false; 6227 6228 spdk_delay_us(SPDK_SEC_TO_USEC); 6229 poll_threads(); 6230 6231 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6232 6233 io_path = bdev_nvme_find_io_path(nbdev_ch); 6234 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6235 6236 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6237 6238 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6239 * be used again. 6240 */ 6241 6242 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6243 done = false; 6244 6245 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6246 6247 poll_threads(); 6248 CU_ASSERT(done == true); 6249 6250 io_path = bdev_nvme_find_io_path(nbdev_ch); 6251 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6252 6253 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6254 6255 spdk_put_io_channel(ch); 6256 6257 poll_threads(); 6258 6259 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6260 CU_ASSERT(rc == 0); 6261 6262 poll_threads(); 6263 spdk_delay_us(1000); 6264 poll_threads(); 6265 6266 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6267 6268 g_opts.disable_auto_failback = false; 6269 } 6270 6271 static void 6272 ut_set_multipath_policy_done(void *cb_arg, int rc) 6273 { 6274 int *done = cb_arg; 6275 6276 SPDK_CU_ASSERT_FATAL(done != NULL); 6277 *done = rc; 6278 } 6279 6280 static void 6281 test_set_multipath_policy(void) 6282 { 6283 struct nvme_path_id path1 = {}, path2 = {}; 6284 struct nvme_ctrlr_opts opts = {}; 6285 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6286 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6287 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6288 const int STRING_SIZE = 32; 6289 const char *attached_names[STRING_SIZE]; 6290 struct nvme_bdev *bdev; 6291 struct spdk_io_channel *ch; 6292 struct nvme_bdev_channel *nbdev_ch; 6293 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6294 int done; 6295 int rc; 6296 6297 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6298 ut_init_trid(&path1.trid); 6299 ut_init_trid2(&path2.trid); 6300 g_ut_attach_ctrlr_status = 0; 6301 g_ut_attach_bdev_count = 1; 6302 6303 g_opts.disable_auto_failback = true; 6304 6305 opts.ctrlr_loss_timeout_sec = -1; 6306 opts.reconnect_delay_sec = 1; 6307 6308 set_thread(0); 6309 6310 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6311 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6312 6313 ctrlr1->ns[0].uuid = &uuid1; 6314 6315 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6316 attach_ctrlr_done, NULL, &dopts, &opts, true); 6317 CU_ASSERT(rc == 0); 6318 6319 spdk_delay_us(1000); 6320 poll_threads(); 6321 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6322 poll_threads(); 6323 6324 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6325 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6326 6327 ctrlr2->ns[0].uuid = &uuid1; 6328 6329 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6330 attach_ctrlr_done, NULL, &dopts, &opts, true); 6331 CU_ASSERT(rc == 0); 6332 6333 spdk_delay_us(1000); 6334 poll_threads(); 6335 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6336 poll_threads(); 6337 6338 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6339 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6340 6341 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6342 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6343 6344 /* If multipath policy is updated before getting any I/O channel, 6345 * an new I/O channel should have the update. 6346 */ 6347 done = -1; 6348 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6349 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6350 ut_set_multipath_policy_done, &done); 6351 poll_threads(); 6352 CU_ASSERT(done == 0); 6353 6354 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6355 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6356 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6357 6358 ch = spdk_get_io_channel(bdev); 6359 SPDK_CU_ASSERT_FATAL(ch != NULL); 6360 nbdev_ch = spdk_io_channel_get_ctx(ch); 6361 6362 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6363 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6364 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6365 6366 /* If multipath policy is updated while a I/O channel is active, 6367 * the update should be applied to the I/O channel immediately. 6368 */ 6369 done = -1; 6370 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6371 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6372 ut_set_multipath_policy_done, &done); 6373 poll_threads(); 6374 CU_ASSERT(done == 0); 6375 6376 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6377 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6378 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6379 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6380 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6381 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6382 6383 spdk_put_io_channel(ch); 6384 6385 poll_threads(); 6386 6387 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6388 CU_ASSERT(rc == 0); 6389 6390 poll_threads(); 6391 spdk_delay_us(1000); 6392 poll_threads(); 6393 6394 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6395 } 6396 6397 static void 6398 test_uuid_generation(void) 6399 { 6400 uint32_t nsid1 = 1, nsid2 = 2; 6401 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6402 char sn3[21] = " "; 6403 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6404 struct spdk_uuid uuid1, uuid2; 6405 int rc; 6406 6407 /* Test case 1: 6408 * Serial numbers are the same, nsids are different. 6409 * Compare two generated UUID - they should be different. */ 6410 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6411 CU_ASSERT(rc == 0); 6412 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6413 CU_ASSERT(rc == 0); 6414 6415 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6416 6417 /* Test case 2: 6418 * Serial numbers differ only by one character, nsids are the same. 6419 * Compare two generated UUID - they should be different. */ 6420 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6421 CU_ASSERT(rc == 0); 6422 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6423 CU_ASSERT(rc == 0); 6424 6425 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6426 6427 /* Test case 3: 6428 * Serial number comprises only of space characters. 6429 * Validate the generated UUID. */ 6430 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6431 CU_ASSERT(rc == 0); 6432 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6433 6434 } 6435 6436 static void 6437 test_retry_io_to_same_path(void) 6438 { 6439 struct nvme_path_id path1 = {}, path2 = {}; 6440 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6441 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 6442 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6443 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6444 const int STRING_SIZE = 32; 6445 const char *attached_names[STRING_SIZE]; 6446 struct nvme_bdev *bdev; 6447 struct spdk_bdev_io *bdev_io; 6448 struct nvme_bdev_io *bio; 6449 struct spdk_io_channel *ch; 6450 struct nvme_bdev_channel *nbdev_ch; 6451 struct nvme_io_path *io_path1, *io_path2; 6452 struct ut_nvme_req *req; 6453 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6454 int done; 6455 int rc; 6456 6457 g_opts.nvme_ioq_poll_period_us = 1; 6458 6459 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6460 ut_init_trid(&path1.trid); 6461 ut_init_trid2(&path2.trid); 6462 g_ut_attach_ctrlr_status = 0; 6463 g_ut_attach_bdev_count = 1; 6464 6465 set_thread(0); 6466 6467 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6468 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6469 6470 ctrlr1->ns[0].uuid = &uuid1; 6471 6472 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6473 attach_ctrlr_done, NULL, &opts, NULL, true); 6474 CU_ASSERT(rc == 0); 6475 6476 spdk_delay_us(1000); 6477 poll_threads(); 6478 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6479 poll_threads(); 6480 6481 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6482 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6483 6484 ctrlr2->ns[0].uuid = &uuid1; 6485 6486 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6487 attach_ctrlr_done, NULL, &opts, NULL, true); 6488 CU_ASSERT(rc == 0); 6489 6490 spdk_delay_us(1000); 6491 poll_threads(); 6492 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6493 poll_threads(); 6494 6495 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6496 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6497 6498 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 6499 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6500 6501 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 6502 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6503 6504 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6505 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6506 6507 done = -1; 6508 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6509 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6510 poll_threads(); 6511 CU_ASSERT(done == 0); 6512 6513 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6514 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6515 CU_ASSERT(bdev->rr_min_io == 1); 6516 6517 ch = spdk_get_io_channel(bdev); 6518 SPDK_CU_ASSERT_FATAL(ch != NULL); 6519 nbdev_ch = spdk_io_channel_get_ctx(ch); 6520 6521 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6522 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6523 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6524 6525 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6526 ut_bdev_io_set_buf(bdev_io); 6527 6528 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6529 6530 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6531 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6532 6533 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6534 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6535 6536 /* The 1st I/O should be submitted to io_path1. */ 6537 bdev_io->internal.in_submit_request = true; 6538 6539 bdev_nvme_submit_request(ch, bdev_io); 6540 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6541 CU_ASSERT(bio->io_path == io_path1); 6542 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6543 6544 spdk_delay_us(1); 6545 6546 poll_threads(); 6547 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6548 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6549 6550 /* The 2nd I/O should be submitted to io_path2 because the path selection 6551 * policy is round-robin. 6552 */ 6553 bdev_io->internal.in_submit_request = true; 6554 6555 bdev_nvme_submit_request(ch, bdev_io); 6556 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6557 CU_ASSERT(bio->io_path == io_path2); 6558 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6559 6560 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6561 SPDK_CU_ASSERT_FATAL(req != NULL); 6562 6563 /* Set retry count to non-zero. */ 6564 g_opts.bdev_retry_count = 2; 6565 6566 /* Inject an I/O error. */ 6567 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6568 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6569 6570 /* The 2nd I/O should be queued to nbdev_ch. */ 6571 spdk_delay_us(1); 6572 poll_thread_times(0, 1); 6573 6574 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6575 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6576 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6577 6578 /* The 2nd I/O should keep caching io_path2. */ 6579 CU_ASSERT(bio->io_path == io_path2); 6580 6581 /* The 2nd I/O should be submitted to io_path2 again. */ 6582 poll_thread_times(0, 1); 6583 6584 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6585 CU_ASSERT(bio->io_path == io_path2); 6586 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6587 6588 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6589 SPDK_CU_ASSERT_FATAL(req != NULL); 6590 6591 /* Inject an I/O error again. */ 6592 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6593 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6594 req->cpl.status.crd = 1; 6595 6596 ctrlr2->cdata.crdt[1] = 1; 6597 6598 /* The 2nd I/O should be queued to nbdev_ch. */ 6599 spdk_delay_us(1); 6600 poll_thread_times(0, 1); 6601 6602 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6603 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6604 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6605 6606 /* The 2nd I/O should keep caching io_path2. */ 6607 CU_ASSERT(bio->io_path == io_path2); 6608 6609 /* Detach ctrlr2 dynamically. */ 6610 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6611 CU_ASSERT(rc == 0); 6612 6613 spdk_delay_us(1000); 6614 poll_threads(); 6615 spdk_delay_us(1000); 6616 poll_threads(); 6617 spdk_delay_us(1000); 6618 poll_threads(); 6619 spdk_delay_us(1000); 6620 poll_threads(); 6621 6622 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 6623 6624 poll_threads(); 6625 spdk_delay_us(100000); 6626 poll_threads(); 6627 spdk_delay_us(1); 6628 poll_threads(); 6629 6630 /* The 2nd I/O should succeed by io_path1. */ 6631 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6632 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6633 CU_ASSERT(bio->io_path == io_path1); 6634 6635 free(bdev_io); 6636 6637 spdk_put_io_channel(ch); 6638 6639 poll_threads(); 6640 spdk_delay_us(1); 6641 poll_threads(); 6642 6643 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6644 CU_ASSERT(rc == 0); 6645 6646 poll_threads(); 6647 spdk_delay_us(1000); 6648 poll_threads(); 6649 6650 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6651 6652 g_opts.nvme_ioq_poll_period_us = 0; 6653 g_opts.bdev_retry_count = 0; 6654 } 6655 6656 /* This case is to verify a fix for a complex race condition that 6657 * failover is lost if fabric connect command gets timeout while 6658 * controller is being reset. 6659 */ 6660 static void 6661 test_race_between_reset_and_disconnected(void) 6662 { 6663 struct spdk_nvme_transport_id trid = {}; 6664 struct spdk_nvme_ctrlr ctrlr = {}; 6665 struct nvme_ctrlr *nvme_ctrlr = NULL; 6666 struct nvme_path_id *curr_trid; 6667 struct spdk_io_channel *ch1, *ch2; 6668 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6669 int rc; 6670 6671 ut_init_trid(&trid); 6672 TAILQ_INIT(&ctrlr.active_io_qpairs); 6673 6674 set_thread(0); 6675 6676 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6677 CU_ASSERT(rc == 0); 6678 6679 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6680 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6681 6682 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6683 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6684 6685 ch1 = spdk_get_io_channel(nvme_ctrlr); 6686 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6687 6688 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6689 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6690 6691 set_thread(1); 6692 6693 ch2 = spdk_get_io_channel(nvme_ctrlr); 6694 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6695 6696 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6697 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6698 6699 /* Reset starts from thread 1. */ 6700 set_thread(1); 6701 6702 nvme_ctrlr->resetting = false; 6703 curr_trid->last_failed_tsc = spdk_get_ticks(); 6704 ctrlr.is_failed = true; 6705 6706 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6707 CU_ASSERT(rc == 0); 6708 CU_ASSERT(nvme_ctrlr->resetting == true); 6709 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6710 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6711 6712 poll_thread_times(0, 3); 6713 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6714 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6715 6716 poll_thread_times(0, 1); 6717 poll_thread_times(1, 1); 6718 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6719 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6720 CU_ASSERT(ctrlr.is_failed == true); 6721 6722 poll_thread_times(1, 1); 6723 poll_thread_times(0, 1); 6724 CU_ASSERT(ctrlr.is_failed == false); 6725 CU_ASSERT(ctrlr.adminq.is_connected == false); 6726 6727 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6728 poll_thread_times(0, 2); 6729 CU_ASSERT(ctrlr.adminq.is_connected == true); 6730 6731 poll_thread_times(0, 1); 6732 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6733 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6734 6735 poll_thread_times(1, 1); 6736 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6737 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6738 CU_ASSERT(nvme_ctrlr->resetting == true); 6739 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6740 6741 poll_thread_times(0, 2); 6742 CU_ASSERT(nvme_ctrlr->resetting == true); 6743 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6744 poll_thread_times(1, 1); 6745 CU_ASSERT(nvme_ctrlr->resetting == true); 6746 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6747 6748 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6749 * 6750 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6751 * connect command is executed. If fabric connect command gets timeout, 6752 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6753 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6754 * 6755 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6756 */ 6757 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6758 CU_ASSERT(rc == -EINPROGRESS); 6759 CU_ASSERT(nvme_ctrlr->resetting == true); 6760 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6761 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6762 6763 poll_thread_times(0, 1); 6764 6765 CU_ASSERT(nvme_ctrlr->resetting == true); 6766 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6767 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6768 6769 poll_threads(); 6770 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6771 poll_threads(); 6772 6773 CU_ASSERT(nvme_ctrlr->resetting == false); 6774 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6775 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6776 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6777 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6778 6779 spdk_put_io_channel(ch2); 6780 6781 set_thread(0); 6782 6783 spdk_put_io_channel(ch1); 6784 6785 poll_threads(); 6786 6787 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6788 CU_ASSERT(rc == 0); 6789 6790 poll_threads(); 6791 spdk_delay_us(1000); 6792 poll_threads(); 6793 6794 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6795 } 6796 static void 6797 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6798 { 6799 int *_rc = (int *)cb_arg; 6800 6801 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6802 *_rc = rc; 6803 } 6804 6805 static void 6806 test_ctrlr_op_rpc(void) 6807 { 6808 struct spdk_nvme_transport_id trid = {}; 6809 struct spdk_nvme_ctrlr ctrlr = {}; 6810 struct nvme_ctrlr *nvme_ctrlr = NULL; 6811 struct nvme_path_id *curr_trid; 6812 struct spdk_io_channel *ch1, *ch2; 6813 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6814 int ctrlr_op_rc; 6815 int rc; 6816 6817 ut_init_trid(&trid); 6818 TAILQ_INIT(&ctrlr.active_io_qpairs); 6819 6820 set_thread(0); 6821 6822 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6823 CU_ASSERT(rc == 0); 6824 6825 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6826 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6827 6828 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6829 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6830 6831 ch1 = spdk_get_io_channel(nvme_ctrlr); 6832 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6833 6834 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6835 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6836 6837 set_thread(1); 6838 6839 ch2 = spdk_get_io_channel(nvme_ctrlr); 6840 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6841 6842 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6843 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6844 6845 /* Reset starts from thread 1. */ 6846 set_thread(1); 6847 6848 /* Case 1: ctrlr is already being destructed. */ 6849 nvme_ctrlr->destruct = true; 6850 ctrlr_op_rc = 0; 6851 6852 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6853 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6854 6855 poll_threads(); 6856 6857 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6858 6859 /* Case 2: reset is in progress. */ 6860 nvme_ctrlr->destruct = false; 6861 nvme_ctrlr->resetting = true; 6862 ctrlr_op_rc = 0; 6863 6864 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6865 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6866 6867 poll_threads(); 6868 6869 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6870 6871 /* Case 3: reset completes successfully. */ 6872 nvme_ctrlr->resetting = false; 6873 curr_trid->last_failed_tsc = spdk_get_ticks(); 6874 ctrlr.is_failed = true; 6875 ctrlr_op_rc = -1; 6876 6877 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6878 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6879 6880 CU_ASSERT(nvme_ctrlr->resetting == true); 6881 CU_ASSERT(ctrlr_op_rc == -1); 6882 6883 poll_threads(); 6884 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6885 poll_threads(); 6886 6887 CU_ASSERT(nvme_ctrlr->resetting == false); 6888 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6889 CU_ASSERT(ctrlr.is_failed == false); 6890 CU_ASSERT(ctrlr_op_rc == 0); 6891 6892 /* Case 4: invalid operation. */ 6893 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6894 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6895 6896 poll_threads(); 6897 6898 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6899 6900 spdk_put_io_channel(ch2); 6901 6902 set_thread(0); 6903 6904 spdk_put_io_channel(ch1); 6905 6906 poll_threads(); 6907 6908 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6909 CU_ASSERT(rc == 0); 6910 6911 poll_threads(); 6912 spdk_delay_us(1000); 6913 poll_threads(); 6914 6915 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6916 } 6917 6918 static void 6919 test_bdev_ctrlr_op_rpc(void) 6920 { 6921 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6922 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6923 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6924 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6925 struct nvme_path_id *curr_trid1, *curr_trid2; 6926 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6927 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6928 int ctrlr_op_rc; 6929 int rc; 6930 6931 ut_init_trid(&trid1); 6932 ut_init_trid2(&trid2); 6933 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6934 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6935 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6936 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6937 ctrlr1.cdata.cntlid = 1; 6938 ctrlr2.cdata.cntlid = 2; 6939 ctrlr1.adminq.is_connected = true; 6940 ctrlr2.adminq.is_connected = true; 6941 6942 set_thread(0); 6943 6944 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6945 CU_ASSERT(rc == 0); 6946 6947 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6948 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6949 6950 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN); 6951 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6952 6953 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6954 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6955 6956 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6957 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6958 6959 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6960 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6961 6962 set_thread(1); 6963 6964 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6965 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6966 6967 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6968 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6969 6970 set_thread(0); 6971 6972 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6973 CU_ASSERT(rc == 0); 6974 6975 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN); 6976 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6977 6978 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 6979 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 6980 6981 ch21 = spdk_get_io_channel(nvme_ctrlr2); 6982 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 6983 6984 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 6985 CU_ASSERT(ctrlr_ch21->qpair != NULL); 6986 6987 set_thread(1); 6988 6989 ch22 = spdk_get_io_channel(nvme_ctrlr2); 6990 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 6991 6992 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 6993 CU_ASSERT(ctrlr_ch22->qpair != NULL); 6994 6995 /* Reset starts from thread 1. */ 6996 set_thread(1); 6997 6998 nvme_ctrlr1->resetting = false; 6999 nvme_ctrlr2->resetting = false; 7000 curr_trid1->last_failed_tsc = spdk_get_ticks(); 7001 curr_trid2->last_failed_tsc = spdk_get_ticks(); 7002 ctrlr_op_rc = -1; 7003 7004 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 7005 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 7006 7007 CU_ASSERT(nvme_ctrlr1->resetting == true); 7008 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7009 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7010 CU_ASSERT(nvme_ctrlr2->resetting == false); 7011 7012 poll_thread_times(0, 3); 7013 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7014 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7015 7016 poll_thread_times(0, 1); 7017 poll_thread_times(1, 1); 7018 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7019 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7020 7021 poll_thread_times(1, 1); 7022 poll_thread_times(0, 1); 7023 CU_ASSERT(ctrlr1.adminq.is_connected == false); 7024 7025 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7026 poll_thread_times(0, 2); 7027 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7028 7029 poll_thread_times(0, 1); 7030 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7031 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7032 7033 poll_thread_times(1, 1); 7034 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7035 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7036 CU_ASSERT(nvme_ctrlr1->resetting == true); 7037 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7038 7039 poll_thread_times(0, 2); 7040 poll_thread_times(1, 1); 7041 poll_thread_times(0, 1); 7042 poll_thread_times(1, 1); 7043 poll_thread_times(0, 1); 7044 poll_thread_times(1, 1); 7045 poll_thread_times(0, 1); 7046 7047 CU_ASSERT(nvme_ctrlr1->resetting == false); 7048 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7049 CU_ASSERT(nvme_ctrlr2->resetting == true); 7050 7051 poll_threads(); 7052 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7053 poll_threads(); 7054 7055 CU_ASSERT(nvme_ctrlr2->resetting == false); 7056 CU_ASSERT(ctrlr_op_rc == 0); 7057 7058 set_thread(1); 7059 7060 spdk_put_io_channel(ch12); 7061 spdk_put_io_channel(ch22); 7062 7063 set_thread(0); 7064 7065 spdk_put_io_channel(ch11); 7066 spdk_put_io_channel(ch21); 7067 7068 poll_threads(); 7069 7070 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7071 CU_ASSERT(rc == 0); 7072 7073 poll_threads(); 7074 spdk_delay_us(1000); 7075 poll_threads(); 7076 7077 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7078 } 7079 7080 static void 7081 test_disable_enable_ctrlr(void) 7082 { 7083 struct spdk_nvme_transport_id trid = {}; 7084 struct spdk_nvme_ctrlr ctrlr = {}; 7085 struct nvme_ctrlr *nvme_ctrlr = NULL; 7086 struct nvme_path_id *curr_trid; 7087 struct spdk_io_channel *ch1, *ch2; 7088 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7089 int rc; 7090 7091 ut_init_trid(&trid); 7092 TAILQ_INIT(&ctrlr.active_io_qpairs); 7093 ctrlr.adminq.is_connected = true; 7094 7095 set_thread(0); 7096 7097 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7098 CU_ASSERT(rc == 0); 7099 7100 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7101 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7102 7103 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7104 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7105 7106 ch1 = spdk_get_io_channel(nvme_ctrlr); 7107 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7108 7109 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7110 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7111 7112 set_thread(1); 7113 7114 ch2 = spdk_get_io_channel(nvme_ctrlr); 7115 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7116 7117 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7118 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7119 7120 /* Disable starts from thread 1. */ 7121 set_thread(1); 7122 7123 /* Case 1: ctrlr is already disabled. */ 7124 nvme_ctrlr->disabled = true; 7125 7126 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7127 CU_ASSERT(rc == -EALREADY); 7128 7129 /* Case 2: ctrlr is already being destructed. */ 7130 nvme_ctrlr->disabled = false; 7131 nvme_ctrlr->destruct = true; 7132 7133 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7134 CU_ASSERT(rc == -ENXIO); 7135 7136 /* Case 3: reset is in progress. */ 7137 nvme_ctrlr->destruct = false; 7138 nvme_ctrlr->resetting = true; 7139 7140 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7141 CU_ASSERT(rc == -EBUSY); 7142 7143 /* Case 4: disable completes successfully. */ 7144 nvme_ctrlr->resetting = false; 7145 7146 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7147 CU_ASSERT(rc == 0); 7148 CU_ASSERT(nvme_ctrlr->resetting == true); 7149 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7150 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7151 7152 poll_thread_times(0, 3); 7153 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7154 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7155 7156 poll_thread_times(0, 1); 7157 poll_thread_times(1, 1); 7158 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7159 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7160 7161 poll_thread_times(1, 1); 7162 poll_thread_times(0, 1); 7163 CU_ASSERT(ctrlr.adminq.is_connected == false); 7164 poll_thread_times(1, 1); 7165 poll_thread_times(0, 1); 7166 poll_thread_times(1, 1); 7167 poll_thread_times(0, 1); 7168 CU_ASSERT(nvme_ctrlr->resetting == false); 7169 CU_ASSERT(nvme_ctrlr->disabled == true); 7170 7171 /* Case 5: enable completes successfully. */ 7172 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7173 CU_ASSERT(rc == 0); 7174 7175 CU_ASSERT(nvme_ctrlr->resetting == true); 7176 CU_ASSERT(nvme_ctrlr->disabled == false); 7177 7178 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7179 poll_thread_times(0, 2); 7180 CU_ASSERT(ctrlr.adminq.is_connected == true); 7181 7182 poll_thread_times(0, 1); 7183 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7184 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7185 7186 poll_thread_times(1, 1); 7187 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7188 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7189 CU_ASSERT(nvme_ctrlr->resetting == true); 7190 7191 poll_thread_times(0, 2); 7192 CU_ASSERT(nvme_ctrlr->resetting == true); 7193 poll_thread_times(1, 1); 7194 CU_ASSERT(nvme_ctrlr->resetting == true); 7195 poll_thread_times(0, 1); 7196 CU_ASSERT(nvme_ctrlr->resetting == false); 7197 7198 /* Case 6: ctrlr is already enabled. */ 7199 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7200 CU_ASSERT(rc == -EALREADY); 7201 7202 set_thread(0); 7203 7204 /* Case 7: disable cancels delayed reconnect. */ 7205 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7206 ctrlr.fail_reset = true; 7207 7208 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7209 CU_ASSERT(rc == 0); 7210 7211 poll_threads(); 7212 7213 CU_ASSERT(nvme_ctrlr->resetting == false); 7214 CU_ASSERT(ctrlr.is_failed == false); 7215 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7216 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7217 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7218 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7219 7220 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7221 CU_ASSERT(rc == 0); 7222 7223 CU_ASSERT(nvme_ctrlr->resetting == true); 7224 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7225 7226 poll_threads(); 7227 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7228 poll_threads(); 7229 7230 CU_ASSERT(nvme_ctrlr->resetting == false); 7231 CU_ASSERT(nvme_ctrlr->disabled == true); 7232 7233 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7234 CU_ASSERT(rc == 0); 7235 7236 CU_ASSERT(nvme_ctrlr->resetting == true); 7237 CU_ASSERT(nvme_ctrlr->disabled == false); 7238 7239 poll_threads(); 7240 7241 CU_ASSERT(nvme_ctrlr->resetting == false); 7242 7243 set_thread(1); 7244 7245 spdk_put_io_channel(ch2); 7246 7247 set_thread(0); 7248 7249 spdk_put_io_channel(ch1); 7250 7251 poll_threads(); 7252 7253 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7254 CU_ASSERT(rc == 0); 7255 7256 poll_threads(); 7257 spdk_delay_us(1000); 7258 poll_threads(); 7259 7260 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7261 } 7262 7263 static void 7264 ut_delete_done(void *ctx, int rc) 7265 { 7266 int *delete_done_rc = ctx; 7267 *delete_done_rc = rc; 7268 } 7269 7270 static void 7271 test_delete_ctrlr_done(void) 7272 { 7273 struct spdk_nvme_transport_id trid = {}; 7274 struct spdk_nvme_ctrlr ctrlr = {}; 7275 int delete_done_rc = 0xDEADBEEF; 7276 int rc; 7277 7278 ut_init_trid(&trid); 7279 7280 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7281 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7282 7283 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7284 CU_ASSERT(rc == 0); 7285 7286 for (int i = 0; i < 20; i++) { 7287 poll_threads(); 7288 if (delete_done_rc == 0) { 7289 break; 7290 } 7291 spdk_delay_us(1000); 7292 } 7293 7294 CU_ASSERT(delete_done_rc == 0); 7295 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7296 } 7297 7298 static void 7299 test_ns_remove_during_reset(void) 7300 { 7301 struct nvme_path_id path = {}; 7302 struct nvme_ctrlr_opts opts = {}; 7303 struct spdk_nvme_ctrlr *ctrlr; 7304 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7305 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7306 struct nvme_ctrlr *nvme_ctrlr; 7307 const int STRING_SIZE = 32; 7308 const char *attached_names[STRING_SIZE]; 7309 struct nvme_bdev *bdev; 7310 struct nvme_ns *nvme_ns; 7311 union spdk_nvme_async_event_completion event = {}; 7312 struct spdk_nvme_cpl cpl = {}; 7313 int rc; 7314 7315 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7316 ut_init_trid(&path.trid); 7317 7318 set_thread(0); 7319 7320 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7321 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7322 7323 g_ut_attach_ctrlr_status = 0; 7324 g_ut_attach_bdev_count = 1; 7325 7326 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7327 attach_ctrlr_done, NULL, &dopts, &opts, false); 7328 CU_ASSERT(rc == 0); 7329 7330 spdk_delay_us(1000); 7331 poll_threads(); 7332 7333 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7334 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7335 7336 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 7337 CU_ASSERT(nvme_ctrlr != NULL); 7338 7339 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7340 CU_ASSERT(bdev != NULL); 7341 7342 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7343 CU_ASSERT(nvme_ns != NULL); 7344 7345 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7346 * but nvme_ns->ns should be NULL. 7347 */ 7348 7349 CU_ASSERT(ctrlr->ns[0].is_active == true); 7350 ctrlr->ns[0].is_active = false; 7351 7352 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7353 CU_ASSERT(rc == 0); 7354 7355 poll_threads(); 7356 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7357 poll_threads(); 7358 7359 CU_ASSERT(nvme_ctrlr->resetting == false); 7360 CU_ASSERT(ctrlr->adminq.is_connected == true); 7361 7362 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7363 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7364 CU_ASSERT(nvme_ns->bdev == bdev); 7365 CU_ASSERT(nvme_ns->ns == NULL); 7366 7367 /* Then, async event should fill nvme_ns->ns again. */ 7368 7369 ctrlr->ns[0].is_active = true; 7370 7371 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7372 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7373 cpl.cdw0 = event.raw; 7374 7375 aer_cb(nvme_ctrlr, &cpl); 7376 7377 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7378 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7379 CU_ASSERT(nvme_ns->bdev == bdev); 7380 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7381 7382 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7383 CU_ASSERT(rc == 0); 7384 7385 poll_threads(); 7386 spdk_delay_us(1000); 7387 poll_threads(); 7388 7389 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7390 } 7391 7392 static void 7393 test_io_path_is_current(void) 7394 { 7395 struct nvme_bdev_channel nbdev_ch = { 7396 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 7397 }; 7398 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 7399 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 7400 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 7401 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }, 7402 nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 7403 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {}; 7404 struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 7405 struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 7406 struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, }; 7407 struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, }; 7408 struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, }; 7409 struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, }; 7410 struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 7411 struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 7412 struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 7413 7414 /* io_path1 is deleting */ 7415 io_path1.nbdev_ch = NULL; 7416 7417 CU_ASSERT(nvme_io_path_is_current(&io_path1) == false); 7418 7419 io_path1.nbdev_ch = &nbdev_ch; 7420 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 7421 io_path2.nbdev_ch = &nbdev_ch; 7422 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 7423 io_path3.nbdev_ch = &nbdev_ch; 7424 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 7425 7426 /* active/active: io_path is current if it is available and ANA optimized. */ 7427 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7428 7429 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7430 7431 /* active/active: io_path is not current if it is disconnected even if it is 7432 * ANA optimized. 7433 */ 7434 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7435 7436 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7437 7438 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7439 7440 /* active/passive: io_path is current if it is available and cached. 7441 * (only ANA optimized path is cached for active/passive.) 7442 */ 7443 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7444 nbdev_ch.current_io_path = &io_path2; 7445 7446 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7447 7448 /* active:passive: io_path is not current if it is disconnected even if it is cached */ 7449 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7450 7451 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7452 7453 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7454 7455 /* active/active and active/passive: io_path is not current if it is ANA inaccessible. */ 7456 nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 7457 7458 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7459 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7460 7461 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7462 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7463 7464 /* active/active: non-optimized path is current only if there is no optimized path. */ 7465 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7466 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7467 7468 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7469 7470 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7471 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7472 7473 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7474 7475 /* active/passive: current is true if it is the first one when there is no optimized path. */ 7476 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7477 nbdev_ch.current_io_path = NULL; 7478 7479 CU_ASSERT(nvme_io_path_is_current(&io_path1) == true); 7480 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7481 CU_ASSERT(nvme_io_path_is_current(&io_path3) == false); 7482 } 7483 7484 int 7485 main(int argc, char **argv) 7486 { 7487 CU_pSuite suite = NULL; 7488 unsigned int num_failures; 7489 7490 CU_initialize_registry(); 7491 7492 suite = CU_add_suite("nvme", NULL, NULL); 7493 7494 CU_ADD_TEST(suite, test_create_ctrlr); 7495 CU_ADD_TEST(suite, test_reset_ctrlr); 7496 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7497 CU_ADD_TEST(suite, test_failover_ctrlr); 7498 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7499 CU_ADD_TEST(suite, test_pending_reset); 7500 CU_ADD_TEST(suite, test_attach_ctrlr); 7501 CU_ADD_TEST(suite, test_aer_cb); 7502 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7503 CU_ADD_TEST(suite, test_add_remove_trid); 7504 CU_ADD_TEST(suite, test_abort); 7505 CU_ADD_TEST(suite, test_get_io_qpair); 7506 CU_ADD_TEST(suite, test_bdev_unregister); 7507 CU_ADD_TEST(suite, test_compare_ns); 7508 CU_ADD_TEST(suite, test_init_ana_log_page); 7509 CU_ADD_TEST(suite, test_get_memory_domains); 7510 CU_ADD_TEST(suite, test_reconnect_qpair); 7511 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7512 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7513 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7514 CU_ADD_TEST(suite, test_admin_path); 7515 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7516 CU_ADD_TEST(suite, test_find_io_path); 7517 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7518 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7519 CU_ADD_TEST(suite, test_retry_io_count); 7520 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7521 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7522 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7523 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7524 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7525 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7526 CU_ADD_TEST(suite, test_fail_path); 7527 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7528 CU_ADD_TEST(suite, test_ana_transition); 7529 CU_ADD_TEST(suite, test_set_preferred_path); 7530 CU_ADD_TEST(suite, test_find_next_io_path); 7531 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7532 CU_ADD_TEST(suite, test_disable_auto_failback); 7533 CU_ADD_TEST(suite, test_set_multipath_policy); 7534 CU_ADD_TEST(suite, test_uuid_generation); 7535 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7536 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7537 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7538 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7539 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7540 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7541 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7542 CU_ADD_TEST(suite, test_io_path_is_current); 7543 7544 allocate_threads(3); 7545 set_thread(0); 7546 bdev_nvme_library_init(); 7547 init_accel(); 7548 7549 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7550 7551 set_thread(0); 7552 bdev_nvme_library_fini(); 7553 fini_accel(); 7554 free_threads(); 7555 7556 CU_cleanup_registry(); 7557 7558 return num_failures; 7559 } 7560