1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 65 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 66 67 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 68 69 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 70 enum spdk_bdev_reset_stat_mode mode)); 71 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 72 struct spdk_bdev_io_stat *add)); 73 74 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 75 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 76 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 77 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 78 79 int 80 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 81 struct spdk_memory_domain **domains, int array_size) 82 { 83 int i, min_array_size; 84 85 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 86 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 87 for (i = 0; i < min_array_size; i++) { 88 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 89 } 90 } 91 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 92 93 return 0; 94 } 95 96 struct spdk_io_channel * 97 spdk_accel_get_io_channel(void) 98 { 99 return spdk_get_io_channel(g_accel_p); 100 } 101 102 void 103 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 104 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 105 { 106 /* Avoid warning that opts is used uninitialised */ 107 memset(opts, 0, opts_size); 108 } 109 110 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 111 (struct spdk_nvme_ctrlr *ctrlr), NULL); 112 113 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 114 (const struct spdk_nvme_ctrlr *ctrlr), 0); 115 116 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 117 (struct spdk_nvme_ctrlr *ctrlr), NULL); 118 119 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 120 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 121 122 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 123 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 124 125 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 126 127 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 128 129 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 130 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 133 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 134 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 135 136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 137 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 138 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 139 140 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 141 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 142 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 143 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 144 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 145 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 146 147 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 148 size_t *size), 0); 149 150 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 151 152 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 153 154 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 155 156 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 157 158 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 159 160 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 161 162 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 163 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 166 167 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 168 char *name, size_t *size), 0); 169 170 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 171 (struct spdk_nvme_ns *ns), 0); 172 173 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 174 (const struct spdk_nvme_ctrlr *ctrlr), 0); 175 176 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 177 (struct spdk_nvme_ns *ns), 0); 178 179 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 180 (struct spdk_nvme_ns *ns), 0); 181 182 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 183 (struct spdk_nvme_ns *ns), 0); 184 185 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 186 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 187 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 188 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 189 190 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 191 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 192 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 193 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 194 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 195 196 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 197 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 198 void *payload, uint32_t payload_size, uint64_t slba, 199 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 200 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 201 202 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 203 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 204 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 205 206 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 208 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 209 210 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 211 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 212 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 213 214 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 215 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 216 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 217 218 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 219 220 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 221 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 222 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 223 224 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 225 (const struct spdk_nvme_status *status), NULL); 226 227 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 228 (const struct spdk_nvme_status *status), NULL); 229 230 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 231 232 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 233 234 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 235 236 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 237 238 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 239 240 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 241 struct iovec *iov, 242 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 243 DEFINE_STUB(spdk_accel_append_crc32c, int, 244 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 245 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 246 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 247 DEFINE_STUB_V(spdk_accel_sequence_finish, 248 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 249 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 250 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 251 252 struct ut_nvme_req { 253 uint16_t opc; 254 spdk_nvme_cmd_cb cb_fn; 255 void *cb_arg; 256 struct spdk_nvme_cpl cpl; 257 TAILQ_ENTRY(ut_nvme_req) tailq; 258 }; 259 260 struct spdk_nvme_ns { 261 struct spdk_nvme_ctrlr *ctrlr; 262 uint32_t id; 263 bool is_active; 264 struct spdk_uuid *uuid; 265 enum spdk_nvme_ana_state ana_state; 266 enum spdk_nvme_csi csi; 267 }; 268 269 struct spdk_nvme_qpair { 270 struct spdk_nvme_ctrlr *ctrlr; 271 uint8_t failure_reason; 272 bool is_connected; 273 bool in_completion_context; 274 bool delete_after_completion_context; 275 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 276 uint32_t num_outstanding_reqs; 277 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 278 struct spdk_nvme_poll_group *poll_group; 279 void *poll_group_tailq_head; 280 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 281 }; 282 283 struct spdk_nvme_ctrlr { 284 uint32_t num_ns; 285 struct spdk_nvme_ns *ns; 286 struct spdk_nvme_ns_data *nsdata; 287 struct spdk_nvme_qpair adminq; 288 struct spdk_nvme_ctrlr_data cdata; 289 bool attached; 290 bool is_failed; 291 bool fail_reset; 292 bool is_removed; 293 struct spdk_nvme_transport_id trid; 294 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 295 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 296 struct spdk_nvme_ctrlr_opts opts; 297 }; 298 299 struct spdk_nvme_poll_group { 300 void *ctx; 301 struct spdk_nvme_accel_fn_table accel_fn_table; 302 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 303 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 304 }; 305 306 struct spdk_nvme_probe_ctx { 307 struct spdk_nvme_transport_id trid; 308 void *cb_ctx; 309 spdk_nvme_attach_cb attach_cb; 310 struct spdk_nvme_ctrlr *init_ctrlr; 311 }; 312 313 uint32_t 314 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 315 { 316 uint32_t nsid; 317 318 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 319 if (ctrlr->ns[nsid - 1].is_active) { 320 return nsid; 321 } 322 } 323 324 return 0; 325 } 326 327 uint32_t 328 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 329 { 330 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 331 if (ctrlr->ns[nsid - 1].is_active) { 332 return nsid; 333 } 334 } 335 336 return 0; 337 } 338 339 uint32_t 340 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 341 { 342 return qpair->num_outstanding_reqs; 343 } 344 345 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 346 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 347 g_ut_attached_ctrlrs); 348 static int g_ut_attach_ctrlr_status; 349 static size_t g_ut_attach_bdev_count; 350 static int g_ut_register_bdev_status; 351 static struct spdk_bdev *g_ut_registered_bdev; 352 static uint16_t g_ut_cntlid; 353 static struct nvme_path_id g_any_path = {}; 354 355 static void 356 ut_init_trid(struct spdk_nvme_transport_id *trid) 357 { 358 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 359 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 360 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 361 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 362 } 363 364 static void 365 ut_init_trid2(struct spdk_nvme_transport_id *trid) 366 { 367 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 368 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 369 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 370 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 371 } 372 373 static void 374 ut_init_trid3(struct spdk_nvme_transport_id *trid) 375 { 376 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 377 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 378 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 379 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 380 } 381 382 static int 383 cmp_int(int a, int b) 384 { 385 return a - b; 386 } 387 388 int 389 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 390 const struct spdk_nvme_transport_id *trid2) 391 { 392 int cmp; 393 394 /* We assume trtype is TCP for now. */ 395 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 396 397 cmp = cmp_int(trid1->trtype, trid2->trtype); 398 if (cmp) { 399 return cmp; 400 } 401 402 cmp = strcasecmp(trid1->traddr, trid2->traddr); 403 if (cmp) { 404 return cmp; 405 } 406 407 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 408 if (cmp) { 409 return cmp; 410 } 411 412 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 413 if (cmp) { 414 return cmp; 415 } 416 417 cmp = strcmp(trid1->subnqn, trid2->subnqn); 418 if (cmp) { 419 return cmp; 420 } 421 422 return 0; 423 } 424 425 static struct spdk_nvme_ctrlr * 426 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 427 bool ana_reporting, bool multipath) 428 { 429 struct spdk_nvme_ctrlr *ctrlr; 430 uint32_t i; 431 432 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 433 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 434 /* There is a ctrlr whose trid matches. */ 435 return NULL; 436 } 437 } 438 439 ctrlr = calloc(1, sizeof(*ctrlr)); 440 if (ctrlr == NULL) { 441 return NULL; 442 } 443 444 ctrlr->attached = true; 445 ctrlr->adminq.ctrlr = ctrlr; 446 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 447 ctrlr->adminq.is_connected = true; 448 449 if (num_ns != 0) { 450 ctrlr->num_ns = num_ns; 451 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 452 if (ctrlr->ns == NULL) { 453 free(ctrlr); 454 return NULL; 455 } 456 457 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 458 if (ctrlr->nsdata == NULL) { 459 free(ctrlr->ns); 460 free(ctrlr); 461 return NULL; 462 } 463 464 for (i = 0; i < num_ns; i++) { 465 ctrlr->ns[i].id = i + 1; 466 ctrlr->ns[i].ctrlr = ctrlr; 467 ctrlr->ns[i].is_active = true; 468 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 469 ctrlr->nsdata[i].nsze = 1024; 470 ctrlr->nsdata[i].nmic.can_share = multipath; 471 } 472 473 ctrlr->cdata.nn = num_ns; 474 ctrlr->cdata.mnan = num_ns; 475 ctrlr->cdata.nanagrpid = num_ns; 476 } 477 478 ctrlr->cdata.cntlid = ++g_ut_cntlid; 479 ctrlr->cdata.cmic.multi_ctrlr = multipath; 480 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 481 ctrlr->trid = *trid; 482 TAILQ_INIT(&ctrlr->active_io_qpairs); 483 484 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 485 486 return ctrlr; 487 } 488 489 static void 490 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 491 { 492 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 493 494 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 495 free(ctrlr->nsdata); 496 free(ctrlr->ns); 497 free(ctrlr); 498 } 499 500 static int 501 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 502 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 503 { 504 struct ut_nvme_req *req; 505 506 req = calloc(1, sizeof(*req)); 507 if (req == NULL) { 508 return -ENOMEM; 509 } 510 511 req->opc = opc; 512 req->cb_fn = cb_fn; 513 req->cb_arg = cb_arg; 514 515 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 516 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 517 518 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 519 qpair->num_outstanding_reqs++; 520 521 return 0; 522 } 523 524 static struct ut_nvme_req * 525 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 526 { 527 struct ut_nvme_req *req; 528 529 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 530 if (req->cb_arg == cb_arg) { 531 break; 532 } 533 } 534 535 return req; 536 } 537 538 static struct spdk_bdev_io * 539 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 540 struct spdk_io_channel *ch) 541 { 542 struct spdk_bdev_io *bdev_io; 543 544 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 545 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 546 bdev_io->type = type; 547 bdev_io->bdev = &nbdev->disk; 548 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 549 550 return bdev_io; 551 } 552 553 static void 554 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 555 { 556 bdev_io->u.bdev.iovs = &bdev_io->iov; 557 bdev_io->u.bdev.iovcnt = 1; 558 559 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 560 bdev_io->iov.iov_len = 4096; 561 } 562 563 static void 564 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 565 { 566 if (ctrlr->is_failed) { 567 free(ctrlr); 568 return; 569 } 570 571 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 572 if (probe_ctx->cb_ctx) { 573 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 574 } 575 576 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 577 578 if (probe_ctx->attach_cb) { 579 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 580 } 581 } 582 583 int 584 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 585 { 586 struct spdk_nvme_ctrlr *ctrlr, *tmp; 587 588 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 589 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 590 continue; 591 } 592 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 593 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 594 } 595 596 free(probe_ctx); 597 598 return 0; 599 } 600 601 struct spdk_nvme_probe_ctx * 602 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 603 const struct spdk_nvme_ctrlr_opts *opts, 604 spdk_nvme_attach_cb attach_cb) 605 { 606 struct spdk_nvme_probe_ctx *probe_ctx; 607 608 if (trid == NULL) { 609 return NULL; 610 } 611 612 probe_ctx = calloc(1, sizeof(*probe_ctx)); 613 if (probe_ctx == NULL) { 614 return NULL; 615 } 616 617 probe_ctx->trid = *trid; 618 probe_ctx->cb_ctx = (void *)opts; 619 probe_ctx->attach_cb = attach_cb; 620 621 return probe_ctx; 622 } 623 624 int 625 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 626 { 627 if (ctrlr->attached) { 628 ut_detach_ctrlr(ctrlr); 629 } 630 631 return 0; 632 } 633 634 int 635 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 636 { 637 SPDK_CU_ASSERT_FATAL(ctx != NULL); 638 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 639 640 return 0; 641 } 642 643 int 644 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 645 { 646 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 647 } 648 649 void 650 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 651 { 652 memset(opts, 0, opts_size); 653 654 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 655 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 656 } 657 658 const struct spdk_nvme_ctrlr_data * 659 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 660 { 661 return &ctrlr->cdata; 662 } 663 664 uint32_t 665 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 666 { 667 return ctrlr->num_ns; 668 } 669 670 struct spdk_nvme_ns * 671 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 672 { 673 if (nsid < 1 || nsid > ctrlr->num_ns) { 674 return NULL; 675 } 676 677 return &ctrlr->ns[nsid - 1]; 678 } 679 680 bool 681 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 682 { 683 if (nsid < 1 || nsid > ctrlr->num_ns) { 684 return false; 685 } 686 687 return ctrlr->ns[nsid - 1].is_active; 688 } 689 690 union spdk_nvme_csts_register 691 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 692 { 693 union spdk_nvme_csts_register csts; 694 695 csts.raw = 0; 696 697 return csts; 698 } 699 700 union spdk_nvme_vs_register 701 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 702 { 703 union spdk_nvme_vs_register vs; 704 705 vs.raw = 0; 706 707 return vs; 708 } 709 710 struct spdk_nvme_qpair * 711 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 712 const struct spdk_nvme_io_qpair_opts *user_opts, 713 size_t opts_size) 714 { 715 struct spdk_nvme_qpair *qpair; 716 717 qpair = calloc(1, sizeof(*qpair)); 718 if (qpair == NULL) { 719 return NULL; 720 } 721 722 qpair->ctrlr = ctrlr; 723 TAILQ_INIT(&qpair->outstanding_reqs); 724 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 725 726 return qpair; 727 } 728 729 static void 730 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 731 { 732 struct spdk_nvme_poll_group *group = qpair->poll_group; 733 734 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 735 736 qpair->poll_group_tailq_head = &group->connected_qpairs; 737 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 738 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 739 } 740 741 static void 742 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 743 { 744 struct spdk_nvme_poll_group *group = qpair->poll_group; 745 746 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 747 748 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 749 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 750 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 751 } 752 753 int 754 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 755 struct spdk_nvme_qpair *qpair) 756 { 757 if (qpair->is_connected) { 758 return -EISCONN; 759 } 760 761 qpair->is_connected = true; 762 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 763 764 if (qpair->poll_group) { 765 nvme_poll_group_connect_qpair(qpair); 766 } 767 768 return 0; 769 } 770 771 void 772 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 773 { 774 if (!qpair->is_connected) { 775 return; 776 } 777 778 qpair->is_connected = false; 779 780 if (qpair->poll_group != NULL) { 781 nvme_poll_group_disconnect_qpair(qpair); 782 } 783 } 784 785 int 786 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 787 { 788 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 789 790 if (qpair->in_completion_context) { 791 qpair->delete_after_completion_context = true; 792 return 0; 793 } 794 795 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 796 797 if (qpair->poll_group != NULL) { 798 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 799 } 800 801 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 802 803 CU_ASSERT(qpair->num_outstanding_reqs == 0); 804 805 free(qpair); 806 807 return 0; 808 } 809 810 int 811 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 812 { 813 if (ctrlr->fail_reset) { 814 ctrlr->is_failed = true; 815 return -EIO; 816 } 817 818 ctrlr->adminq.is_connected = true; 819 return 0; 820 } 821 822 void 823 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 824 { 825 } 826 827 int 828 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 829 { 830 if (ctrlr->is_removed) { 831 return -ENXIO; 832 } 833 834 ctrlr->adminq.is_connected = false; 835 ctrlr->is_failed = false; 836 837 return 0; 838 } 839 840 void 841 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 842 { 843 ctrlr->is_failed = true; 844 } 845 846 bool 847 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 848 { 849 return ctrlr->is_failed; 850 } 851 852 spdk_nvme_qp_failure_reason 853 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 854 { 855 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 856 } 857 858 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 859 sizeof(uint32_t)) 860 static void 861 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 862 { 863 struct spdk_nvme_ana_page ana_hdr; 864 char _ana_desc[UT_ANA_DESC_SIZE]; 865 struct spdk_nvme_ana_group_descriptor *ana_desc; 866 struct spdk_nvme_ns *ns; 867 uint32_t i; 868 869 memset(&ana_hdr, 0, sizeof(ana_hdr)); 870 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 871 872 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 873 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 874 875 buf += sizeof(ana_hdr); 876 length -= sizeof(ana_hdr); 877 878 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 879 880 for (i = 0; i < ctrlr->num_ns; i++) { 881 ns = &ctrlr->ns[i]; 882 883 if (!ns->is_active) { 884 continue; 885 } 886 887 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 888 889 ana_desc->ana_group_id = ns->id; 890 ana_desc->num_of_nsid = 1; 891 ana_desc->ana_state = ns->ana_state; 892 ana_desc->nsid[0] = ns->id; 893 894 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 895 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 896 897 buf += UT_ANA_DESC_SIZE; 898 length -= UT_ANA_DESC_SIZE; 899 } 900 } 901 902 int 903 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 904 uint8_t log_page, uint32_t nsid, 905 void *payload, uint32_t payload_size, 906 uint64_t offset, 907 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 908 { 909 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 910 SPDK_CU_ASSERT_FATAL(offset == 0); 911 ut_create_ana_log_page(ctrlr, payload, payload_size); 912 } 913 914 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 915 cb_fn, cb_arg); 916 } 917 918 int 919 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 920 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 921 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 922 { 923 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 924 } 925 926 int 927 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 928 void *cmd_cb_arg, 929 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 930 { 931 struct ut_nvme_req *req = NULL, *abort_req; 932 933 if (qpair == NULL) { 934 qpair = &ctrlr->adminq; 935 } 936 937 abort_req = calloc(1, sizeof(*abort_req)); 938 if (abort_req == NULL) { 939 return -ENOMEM; 940 } 941 942 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 943 if (req->cb_arg == cmd_cb_arg) { 944 break; 945 } 946 } 947 948 if (req == NULL) { 949 free(abort_req); 950 return -ENOENT; 951 } 952 953 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 954 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 955 956 abort_req->opc = SPDK_NVME_OPC_ABORT; 957 abort_req->cb_fn = cb_fn; 958 abort_req->cb_arg = cb_arg; 959 960 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 961 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 962 abort_req->cpl.cdw0 = 0; 963 964 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 965 ctrlr->adminq.num_outstanding_reqs++; 966 967 return 0; 968 } 969 970 int32_t 971 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 972 { 973 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 974 } 975 976 uint32_t 977 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 978 { 979 return ns->id; 980 } 981 982 struct spdk_nvme_ctrlr * 983 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 984 { 985 return ns->ctrlr; 986 } 987 988 static inline struct spdk_nvme_ns_data * 989 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 990 { 991 return &ns->ctrlr->nsdata[ns->id - 1]; 992 } 993 994 const struct spdk_nvme_ns_data * 995 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 996 { 997 return _nvme_ns_get_data(ns); 998 } 999 1000 uint64_t 1001 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1002 { 1003 return _nvme_ns_get_data(ns)->nsze; 1004 } 1005 1006 const struct spdk_uuid * 1007 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1008 { 1009 return ns->uuid; 1010 } 1011 1012 enum spdk_nvme_csi 1013 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1014 return ns->csi; 1015 } 1016 1017 int 1018 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1019 void *metadata, uint64_t lba, uint32_t lba_count, 1020 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1021 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1022 { 1023 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1024 } 1025 1026 int 1027 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1028 void *buffer, void *metadata, uint64_t lba, 1029 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1030 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1031 { 1032 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1033 } 1034 1035 int 1036 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1037 uint64_t lba, uint32_t lba_count, 1038 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1039 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1040 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1041 uint16_t apptag_mask, uint16_t apptag) 1042 { 1043 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1044 } 1045 1046 int 1047 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1048 uint64_t lba, uint32_t lba_count, 1049 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1050 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1051 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1052 uint16_t apptag_mask, uint16_t apptag) 1053 { 1054 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1055 } 1056 1057 static bool g_ut_readv_ext_called; 1058 int 1059 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1060 uint64_t lba, uint32_t lba_count, 1061 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1062 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1063 spdk_nvme_req_next_sge_cb next_sge_fn, 1064 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1065 { 1066 g_ut_readv_ext_called = true; 1067 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1068 } 1069 1070 static bool g_ut_read_ext_called; 1071 int 1072 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1073 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1074 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1075 { 1076 g_ut_read_ext_called = true; 1077 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1078 } 1079 1080 static bool g_ut_writev_ext_called; 1081 int 1082 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1083 uint64_t lba, uint32_t lba_count, 1084 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1085 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1086 spdk_nvme_req_next_sge_cb next_sge_fn, 1087 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1088 { 1089 g_ut_writev_ext_called = true; 1090 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1091 } 1092 1093 static bool g_ut_write_ext_called; 1094 int 1095 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1096 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1097 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1098 { 1099 g_ut_write_ext_called = true; 1100 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1101 } 1102 1103 int 1104 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1105 uint64_t lba, uint32_t lba_count, 1106 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1107 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1108 spdk_nvme_req_next_sge_cb next_sge_fn, 1109 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1110 { 1111 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1112 } 1113 1114 int 1115 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1116 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1117 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1118 { 1119 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1120 } 1121 1122 int 1123 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1124 uint64_t lba, uint32_t lba_count, 1125 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1126 uint32_t io_flags) 1127 { 1128 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1129 } 1130 1131 int 1132 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1133 const struct spdk_nvme_scc_source_range *ranges, 1134 uint16_t num_ranges, uint64_t dest_lba, 1135 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1136 { 1137 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1138 } 1139 1140 struct spdk_nvme_poll_group * 1141 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1142 { 1143 struct spdk_nvme_poll_group *group; 1144 1145 group = calloc(1, sizeof(*group)); 1146 if (group == NULL) { 1147 return NULL; 1148 } 1149 1150 group->ctx = ctx; 1151 if (table != NULL) { 1152 group->accel_fn_table = *table; 1153 } 1154 TAILQ_INIT(&group->connected_qpairs); 1155 TAILQ_INIT(&group->disconnected_qpairs); 1156 1157 return group; 1158 } 1159 1160 int 1161 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1162 { 1163 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1164 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1165 return -EBUSY; 1166 } 1167 1168 free(group); 1169 1170 return 0; 1171 } 1172 1173 spdk_nvme_qp_failure_reason 1174 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1175 { 1176 return qpair->failure_reason; 1177 } 1178 1179 bool 1180 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1181 { 1182 return qpair->is_connected; 1183 } 1184 1185 int32_t 1186 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1187 uint32_t max_completions) 1188 { 1189 struct ut_nvme_req *req, *tmp; 1190 uint32_t num_completions = 0; 1191 1192 if (!qpair->is_connected) { 1193 return -ENXIO; 1194 } 1195 1196 qpair->in_completion_context = true; 1197 1198 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1199 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1200 qpair->num_outstanding_reqs--; 1201 1202 req->cb_fn(req->cb_arg, &req->cpl); 1203 1204 free(req); 1205 num_completions++; 1206 } 1207 1208 qpair->in_completion_context = false; 1209 if (qpair->delete_after_completion_context) { 1210 spdk_nvme_ctrlr_free_io_qpair(qpair); 1211 } 1212 1213 return num_completions; 1214 } 1215 1216 int64_t 1217 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1218 uint32_t completions_per_qpair, 1219 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1220 { 1221 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1222 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1223 1224 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1225 1226 if (disconnected_qpair_cb == NULL) { 1227 return -EINVAL; 1228 } 1229 1230 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1231 disconnected_qpair_cb(qpair, group->ctx); 1232 } 1233 1234 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1235 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1236 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1237 /* Bump the number of completions so this counts as "busy" */ 1238 num_completions++; 1239 continue; 1240 } 1241 1242 local_completions = spdk_nvme_qpair_process_completions(qpair, 1243 completions_per_qpair); 1244 if (local_completions < 0 && error_reason == 0) { 1245 error_reason = local_completions; 1246 } else { 1247 num_completions += local_completions; 1248 assert(num_completions >= 0); 1249 } 1250 } 1251 1252 return error_reason ? error_reason : num_completions; 1253 } 1254 1255 int 1256 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1257 struct spdk_nvme_qpair *qpair) 1258 { 1259 CU_ASSERT(!qpair->is_connected); 1260 1261 qpair->poll_group = group; 1262 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1263 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1264 1265 return 0; 1266 } 1267 1268 int 1269 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1270 struct spdk_nvme_qpair *qpair) 1271 { 1272 CU_ASSERT(!qpair->is_connected); 1273 1274 if (qpair->poll_group == NULL) { 1275 return -ENOENT; 1276 } 1277 1278 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1279 1280 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1281 1282 qpair->poll_group = NULL; 1283 qpair->poll_group_tailq_head = NULL; 1284 1285 return 0; 1286 } 1287 1288 int 1289 spdk_bdev_register(struct spdk_bdev *bdev) 1290 { 1291 g_ut_registered_bdev = bdev; 1292 1293 return g_ut_register_bdev_status; 1294 } 1295 1296 void 1297 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1298 { 1299 int rc; 1300 1301 rc = bdev->fn_table->destruct(bdev->ctxt); 1302 1303 if (bdev == g_ut_registered_bdev) { 1304 g_ut_registered_bdev = NULL; 1305 } 1306 1307 if (rc <= 0 && cb_fn != NULL) { 1308 cb_fn(cb_arg, rc); 1309 } 1310 } 1311 1312 int 1313 spdk_bdev_open_ext(const char *bdev_name, bool write, 1314 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1315 struct spdk_bdev_desc **desc) 1316 { 1317 if (g_ut_registered_bdev == NULL || 1318 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1319 return -ENODEV; 1320 } 1321 1322 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1323 1324 return 0; 1325 } 1326 1327 struct spdk_bdev * 1328 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1329 { 1330 return (struct spdk_bdev *)desc; 1331 } 1332 1333 int 1334 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1335 { 1336 bdev->blockcnt = size; 1337 1338 return 0; 1339 } 1340 1341 struct spdk_io_channel * 1342 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1343 { 1344 return (struct spdk_io_channel *)bdev_io->internal.ch; 1345 } 1346 1347 struct spdk_thread * 1348 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1349 { 1350 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1351 } 1352 1353 void 1354 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1355 { 1356 bdev_io->internal.status = status; 1357 bdev_io->internal.in_submit_request = false; 1358 } 1359 1360 void 1361 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1362 { 1363 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1364 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1365 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1366 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1367 } else { 1368 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1369 } 1370 1371 bdev_io->internal.error.nvme.cdw0 = cdw0; 1372 bdev_io->internal.error.nvme.sct = sct; 1373 bdev_io->internal.error.nvme.sc = sc; 1374 1375 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1376 } 1377 1378 void 1379 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1380 { 1381 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1382 1383 ut_bdev_io_set_buf(bdev_io); 1384 1385 cb(ch, bdev_io, true); 1386 } 1387 1388 static void 1389 test_create_ctrlr(void) 1390 { 1391 struct spdk_nvme_transport_id trid = {}; 1392 struct spdk_nvme_ctrlr ctrlr = {}; 1393 int rc; 1394 1395 ut_init_trid(&trid); 1396 1397 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1398 CU_ASSERT(rc == 0); 1399 1400 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1401 1402 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1403 CU_ASSERT(rc == 0); 1404 1405 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1406 1407 poll_threads(); 1408 spdk_delay_us(1000); 1409 poll_threads(); 1410 1411 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1412 } 1413 1414 static void 1415 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1416 { 1417 bool *detect_remove = cb_arg; 1418 1419 CU_ASSERT(rc != 0); 1420 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1421 1422 *detect_remove = true; 1423 } 1424 1425 static void 1426 test_reset_ctrlr(void) 1427 { 1428 struct spdk_nvme_transport_id trid = {}; 1429 struct spdk_nvme_ctrlr ctrlr = {}; 1430 struct nvme_ctrlr *nvme_ctrlr = NULL; 1431 struct nvme_path_id *curr_trid; 1432 struct spdk_io_channel *ch1, *ch2; 1433 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1434 bool detect_remove; 1435 int rc; 1436 1437 ut_init_trid(&trid); 1438 TAILQ_INIT(&ctrlr.active_io_qpairs); 1439 1440 set_thread(0); 1441 1442 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1443 CU_ASSERT(rc == 0); 1444 1445 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1446 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1447 1448 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1449 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1450 1451 ch1 = spdk_get_io_channel(nvme_ctrlr); 1452 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1453 1454 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1455 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1456 1457 set_thread(1); 1458 1459 ch2 = spdk_get_io_channel(nvme_ctrlr); 1460 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1461 1462 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1463 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1464 1465 /* Reset starts from thread 1. */ 1466 set_thread(1); 1467 1468 /* Case 1: ctrlr is already being destructed. */ 1469 nvme_ctrlr->destruct = true; 1470 1471 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1472 CU_ASSERT(rc == -ENXIO); 1473 1474 /* Case 2: reset is in progress. */ 1475 nvme_ctrlr->destruct = false; 1476 nvme_ctrlr->resetting = true; 1477 1478 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1479 CU_ASSERT(rc == -EBUSY); 1480 1481 /* Case 3: reset completes successfully. */ 1482 nvme_ctrlr->resetting = false; 1483 curr_trid->last_failed_tsc = spdk_get_ticks(); 1484 ctrlr.is_failed = true; 1485 1486 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1487 CU_ASSERT(rc == 0); 1488 CU_ASSERT(nvme_ctrlr->resetting == true); 1489 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1490 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1491 1492 poll_thread_times(0, 3); 1493 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1494 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1495 1496 poll_thread_times(0, 1); 1497 poll_thread_times(1, 1); 1498 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1499 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1500 CU_ASSERT(ctrlr.is_failed == true); 1501 1502 poll_thread_times(1, 1); 1503 poll_thread_times(0, 1); 1504 CU_ASSERT(ctrlr.is_failed == false); 1505 CU_ASSERT(ctrlr.adminq.is_connected == false); 1506 1507 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1508 poll_thread_times(0, 2); 1509 CU_ASSERT(ctrlr.adminq.is_connected == true); 1510 1511 poll_thread_times(0, 1); 1512 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1513 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1514 1515 poll_thread_times(1, 1); 1516 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1517 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1518 CU_ASSERT(nvme_ctrlr->resetting == true); 1519 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1520 1521 poll_thread_times(0, 2); 1522 CU_ASSERT(nvme_ctrlr->resetting == true); 1523 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1524 poll_thread_times(1, 1); 1525 CU_ASSERT(nvme_ctrlr->resetting == true); 1526 poll_thread_times(0, 1); 1527 CU_ASSERT(nvme_ctrlr->resetting == false); 1528 1529 /* Case 4: ctrlr is already removed. */ 1530 ctrlr.is_removed = true; 1531 1532 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1533 CU_ASSERT(rc == 0); 1534 1535 detect_remove = false; 1536 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1537 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1538 1539 poll_threads(); 1540 1541 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1542 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1543 CU_ASSERT(detect_remove == true); 1544 1545 ctrlr.is_removed = false; 1546 1547 spdk_put_io_channel(ch2); 1548 1549 set_thread(0); 1550 1551 spdk_put_io_channel(ch1); 1552 1553 poll_threads(); 1554 1555 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1556 CU_ASSERT(rc == 0); 1557 1558 poll_threads(); 1559 spdk_delay_us(1000); 1560 poll_threads(); 1561 1562 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1563 } 1564 1565 static void 1566 test_race_between_reset_and_destruct_ctrlr(void) 1567 { 1568 struct spdk_nvme_transport_id trid = {}; 1569 struct spdk_nvme_ctrlr ctrlr = {}; 1570 struct nvme_ctrlr *nvme_ctrlr; 1571 struct spdk_io_channel *ch1, *ch2; 1572 int rc; 1573 1574 ut_init_trid(&trid); 1575 TAILQ_INIT(&ctrlr.active_io_qpairs); 1576 1577 set_thread(0); 1578 1579 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1580 CU_ASSERT(rc == 0); 1581 1582 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1583 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1584 1585 ch1 = spdk_get_io_channel(nvme_ctrlr); 1586 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1587 1588 set_thread(1); 1589 1590 ch2 = spdk_get_io_channel(nvme_ctrlr); 1591 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1592 1593 /* Reset starts from thread 1. */ 1594 set_thread(1); 1595 1596 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1597 CU_ASSERT(rc == 0); 1598 CU_ASSERT(nvme_ctrlr->resetting == true); 1599 1600 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1601 set_thread(0); 1602 1603 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1604 CU_ASSERT(rc == 0); 1605 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1606 CU_ASSERT(nvme_ctrlr->destruct == true); 1607 CU_ASSERT(nvme_ctrlr->resetting == true); 1608 1609 poll_threads(); 1610 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1611 poll_threads(); 1612 1613 /* Reset completed but ctrlr is not still destructed yet. */ 1614 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1615 CU_ASSERT(nvme_ctrlr->destruct == true); 1616 CU_ASSERT(nvme_ctrlr->resetting == false); 1617 1618 /* New reset request is rejected. */ 1619 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1620 CU_ASSERT(rc == -ENXIO); 1621 1622 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1623 * However there are two channels and destruct is not completed yet. 1624 */ 1625 poll_threads(); 1626 1627 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1628 1629 set_thread(0); 1630 1631 spdk_put_io_channel(ch1); 1632 1633 set_thread(1); 1634 1635 spdk_put_io_channel(ch2); 1636 1637 poll_threads(); 1638 spdk_delay_us(1000); 1639 poll_threads(); 1640 1641 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1642 } 1643 1644 static void 1645 test_failover_ctrlr(void) 1646 { 1647 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1648 struct spdk_nvme_ctrlr ctrlr = {}; 1649 struct nvme_ctrlr *nvme_ctrlr = NULL; 1650 struct nvme_path_id *curr_trid, *next_trid; 1651 struct spdk_io_channel *ch1, *ch2; 1652 int rc; 1653 1654 ut_init_trid(&trid1); 1655 ut_init_trid2(&trid2); 1656 TAILQ_INIT(&ctrlr.active_io_qpairs); 1657 1658 set_thread(0); 1659 1660 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1661 CU_ASSERT(rc == 0); 1662 1663 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1664 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1665 1666 ch1 = spdk_get_io_channel(nvme_ctrlr); 1667 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1668 1669 set_thread(1); 1670 1671 ch2 = spdk_get_io_channel(nvme_ctrlr); 1672 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1673 1674 /* First, test one trid case. */ 1675 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1676 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1677 1678 /* Failover starts from thread 1. */ 1679 set_thread(1); 1680 1681 /* Case 1: ctrlr is already being destructed. */ 1682 nvme_ctrlr->destruct = true; 1683 1684 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1685 CU_ASSERT(rc == -ENXIO); 1686 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1687 1688 /* Case 2: reset is in progress. */ 1689 nvme_ctrlr->destruct = false; 1690 nvme_ctrlr->resetting = true; 1691 1692 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1693 CU_ASSERT(rc == -EINPROGRESS); 1694 1695 /* Case 3: reset completes successfully. */ 1696 nvme_ctrlr->resetting = false; 1697 1698 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1699 CU_ASSERT(rc == 0); 1700 1701 CU_ASSERT(nvme_ctrlr->resetting == true); 1702 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1703 1704 poll_threads(); 1705 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1706 poll_threads(); 1707 1708 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1709 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1710 1711 CU_ASSERT(nvme_ctrlr->resetting == false); 1712 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1713 1714 set_thread(0); 1715 1716 /* Second, test two trids case. */ 1717 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1718 CU_ASSERT(rc == 0); 1719 1720 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1721 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1722 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1723 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1724 1725 /* Failover starts from thread 1. */ 1726 set_thread(1); 1727 1728 /* Case 4: reset is in progress. */ 1729 nvme_ctrlr->resetting = true; 1730 1731 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1732 CU_ASSERT(rc == -EINPROGRESS); 1733 1734 /* Case 5: failover completes successfully. */ 1735 nvme_ctrlr->resetting = false; 1736 1737 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1738 CU_ASSERT(rc == 0); 1739 1740 CU_ASSERT(nvme_ctrlr->resetting == true); 1741 1742 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1743 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1744 CU_ASSERT(next_trid != curr_trid); 1745 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1746 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1747 1748 poll_threads(); 1749 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1750 poll_threads(); 1751 1752 CU_ASSERT(nvme_ctrlr->resetting == false); 1753 1754 spdk_put_io_channel(ch2); 1755 1756 set_thread(0); 1757 1758 spdk_put_io_channel(ch1); 1759 1760 poll_threads(); 1761 1762 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1763 CU_ASSERT(rc == 0); 1764 1765 poll_threads(); 1766 spdk_delay_us(1000); 1767 poll_threads(); 1768 1769 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1770 } 1771 1772 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1773 * 1774 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1775 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1776 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1777 * have been active, i.e., the head of the list until the failover completed. 1778 * However trid3 was inserted to the head of the list by mistake. 1779 * 1780 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1781 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1782 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1783 * may be executed repeatedly before failover is executed. Hence this bug is real. 1784 * 1785 * The following test verifies the fix. 1786 */ 1787 static void 1788 test_race_between_failover_and_add_secondary_trid(void) 1789 { 1790 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1791 struct spdk_nvme_ctrlr ctrlr = {}; 1792 struct nvme_ctrlr *nvme_ctrlr = NULL; 1793 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1794 struct spdk_io_channel *ch1, *ch2; 1795 int rc; 1796 1797 ut_init_trid(&trid1); 1798 ut_init_trid2(&trid2); 1799 ut_init_trid3(&trid3); 1800 TAILQ_INIT(&ctrlr.active_io_qpairs); 1801 1802 set_thread(0); 1803 1804 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1805 CU_ASSERT(rc == 0); 1806 1807 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1808 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1809 1810 ch1 = spdk_get_io_channel(nvme_ctrlr); 1811 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1812 1813 set_thread(1); 1814 1815 ch2 = spdk_get_io_channel(nvme_ctrlr); 1816 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1817 1818 set_thread(0); 1819 1820 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1821 CU_ASSERT(rc == 0); 1822 1823 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1824 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1825 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1826 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1827 path_id2 = TAILQ_NEXT(path_id1, link); 1828 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1829 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1830 1831 ctrlr.fail_reset = true; 1832 1833 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1834 CU_ASSERT(rc == 0); 1835 1836 poll_threads(); 1837 1838 CU_ASSERT(path_id1->last_failed_tsc != 0); 1839 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1840 1841 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1842 CU_ASSERT(rc == 0); 1843 1844 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1845 CU_ASSERT(rc == 0); 1846 1847 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1848 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1849 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1850 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1851 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1852 path_id3 = TAILQ_NEXT(path_id2, link); 1853 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1854 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1855 1856 poll_threads(); 1857 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1858 poll_threads(); 1859 1860 spdk_put_io_channel(ch1); 1861 1862 set_thread(1); 1863 1864 spdk_put_io_channel(ch2); 1865 1866 poll_threads(); 1867 1868 set_thread(0); 1869 1870 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1871 CU_ASSERT(rc == 0); 1872 1873 poll_threads(); 1874 spdk_delay_us(1000); 1875 poll_threads(); 1876 1877 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1878 } 1879 1880 static void 1881 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1882 { 1883 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1884 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1885 } 1886 1887 static void 1888 test_pending_reset(void) 1889 { 1890 struct spdk_nvme_transport_id trid = {}; 1891 struct spdk_nvme_ctrlr *ctrlr; 1892 struct nvme_ctrlr *nvme_ctrlr = NULL; 1893 const int STRING_SIZE = 32; 1894 const char *attached_names[STRING_SIZE]; 1895 struct nvme_bdev *bdev; 1896 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1897 struct spdk_io_channel *ch1, *ch2; 1898 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1899 struct nvme_io_path *io_path1, *io_path2; 1900 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1901 int rc; 1902 1903 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1904 ut_init_trid(&trid); 1905 1906 set_thread(0); 1907 1908 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1909 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1910 1911 g_ut_attach_ctrlr_status = 0; 1912 g_ut_attach_bdev_count = 1; 1913 1914 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1915 attach_ctrlr_done, NULL, NULL, NULL, false); 1916 CU_ASSERT(rc == 0); 1917 1918 spdk_delay_us(1000); 1919 poll_threads(); 1920 1921 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1922 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1923 1924 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1925 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1926 1927 ch1 = spdk_get_io_channel(bdev); 1928 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1929 1930 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1931 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1932 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1933 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1934 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1935 1936 set_thread(1); 1937 1938 ch2 = spdk_get_io_channel(bdev); 1939 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1940 1941 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1942 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1943 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1944 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1945 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1946 1947 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1948 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1949 1950 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1951 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1952 1953 /* The first reset request is submitted on thread 1, and the second reset request 1954 * is submitted on thread 0 while processing the first request. 1955 */ 1956 bdev_nvme_submit_request(ch2, first_bdev_io); 1957 CU_ASSERT(nvme_ctrlr->resetting == true); 1958 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1959 1960 set_thread(0); 1961 1962 bdev_nvme_submit_request(ch1, second_bdev_io); 1963 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1964 1965 poll_threads(); 1966 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1967 poll_threads(); 1968 1969 CU_ASSERT(nvme_ctrlr->resetting == false); 1970 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1971 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1972 1973 /* The first reset request is submitted on thread 1, and the second reset request 1974 * is submitted on thread 0 while processing the first request. 1975 * 1976 * The difference from the above scenario is that the controller is removed while 1977 * processing the first request. Hence both reset requests should fail. 1978 */ 1979 set_thread(1); 1980 1981 bdev_nvme_submit_request(ch2, first_bdev_io); 1982 CU_ASSERT(nvme_ctrlr->resetting == true); 1983 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1984 1985 set_thread(0); 1986 1987 bdev_nvme_submit_request(ch1, second_bdev_io); 1988 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1989 1990 ctrlr->fail_reset = true; 1991 1992 poll_threads(); 1993 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1994 poll_threads(); 1995 1996 CU_ASSERT(nvme_ctrlr->resetting == false); 1997 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1998 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1999 2000 spdk_put_io_channel(ch1); 2001 2002 set_thread(1); 2003 2004 spdk_put_io_channel(ch2); 2005 2006 poll_threads(); 2007 2008 set_thread(0); 2009 2010 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2011 CU_ASSERT(rc == 0); 2012 2013 poll_threads(); 2014 spdk_delay_us(1000); 2015 poll_threads(); 2016 2017 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2018 2019 free(first_bdev_io); 2020 free(second_bdev_io); 2021 } 2022 2023 static void 2024 test_attach_ctrlr(void) 2025 { 2026 struct spdk_nvme_transport_id trid = {}; 2027 struct spdk_nvme_ctrlr *ctrlr; 2028 struct nvme_ctrlr *nvme_ctrlr; 2029 const int STRING_SIZE = 32; 2030 const char *attached_names[STRING_SIZE]; 2031 struct nvme_bdev *nbdev; 2032 int rc; 2033 2034 set_thread(0); 2035 2036 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2037 ut_init_trid(&trid); 2038 2039 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2040 * by probe polling. 2041 */ 2042 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2043 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2044 2045 ctrlr->is_failed = true; 2046 g_ut_attach_ctrlr_status = -EIO; 2047 g_ut_attach_bdev_count = 0; 2048 2049 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2050 attach_ctrlr_done, NULL, NULL, NULL, false); 2051 CU_ASSERT(rc == 0); 2052 2053 spdk_delay_us(1000); 2054 poll_threads(); 2055 2056 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2057 2058 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2059 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2060 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2061 2062 g_ut_attach_ctrlr_status = 0; 2063 2064 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2065 attach_ctrlr_done, NULL, NULL, NULL, false); 2066 CU_ASSERT(rc == 0); 2067 2068 spdk_delay_us(1000); 2069 poll_threads(); 2070 2071 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2072 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2073 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2074 2075 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2076 CU_ASSERT(rc == 0); 2077 2078 poll_threads(); 2079 spdk_delay_us(1000); 2080 poll_threads(); 2081 2082 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2083 2084 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2085 * one nvme_bdev is created. 2086 */ 2087 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2088 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2089 2090 g_ut_attach_bdev_count = 1; 2091 2092 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2093 attach_ctrlr_done, NULL, NULL, NULL, false); 2094 CU_ASSERT(rc == 0); 2095 2096 spdk_delay_us(1000); 2097 poll_threads(); 2098 2099 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2100 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2101 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2102 2103 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2104 attached_names[0] = NULL; 2105 2106 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2107 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2108 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2109 2110 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2111 CU_ASSERT(rc == 0); 2112 2113 poll_threads(); 2114 spdk_delay_us(1000); 2115 poll_threads(); 2116 2117 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2118 2119 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2120 * created because creating one nvme_bdev failed. 2121 */ 2122 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2123 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2124 2125 g_ut_register_bdev_status = -EINVAL; 2126 g_ut_attach_bdev_count = 0; 2127 2128 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2129 attach_ctrlr_done, NULL, NULL, NULL, false); 2130 CU_ASSERT(rc == 0); 2131 2132 spdk_delay_us(1000); 2133 poll_threads(); 2134 2135 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2136 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2137 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2138 2139 CU_ASSERT(attached_names[0] == NULL); 2140 2141 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2142 CU_ASSERT(rc == 0); 2143 2144 poll_threads(); 2145 spdk_delay_us(1000); 2146 poll_threads(); 2147 2148 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2149 2150 g_ut_register_bdev_status = 0; 2151 } 2152 2153 static void 2154 test_aer_cb(void) 2155 { 2156 struct spdk_nvme_transport_id trid = {}; 2157 struct spdk_nvme_ctrlr *ctrlr; 2158 struct nvme_ctrlr *nvme_ctrlr; 2159 struct nvme_bdev *bdev; 2160 const int STRING_SIZE = 32; 2161 const char *attached_names[STRING_SIZE]; 2162 union spdk_nvme_async_event_completion event = {}; 2163 struct spdk_nvme_cpl cpl = {}; 2164 int rc; 2165 2166 set_thread(0); 2167 2168 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2169 ut_init_trid(&trid); 2170 2171 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2172 * namespaces are populated. 2173 */ 2174 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2175 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2176 2177 ctrlr->ns[0].is_active = false; 2178 2179 g_ut_attach_ctrlr_status = 0; 2180 g_ut_attach_bdev_count = 3; 2181 2182 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2183 attach_ctrlr_done, NULL, NULL, NULL, false); 2184 CU_ASSERT(rc == 0); 2185 2186 spdk_delay_us(1000); 2187 poll_threads(); 2188 2189 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2190 poll_threads(); 2191 2192 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2193 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2194 2195 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2196 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2197 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2198 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2199 2200 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2201 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2202 CU_ASSERT(bdev->disk.blockcnt == 1024); 2203 2204 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2205 * change the size of the 4th namespace. 2206 */ 2207 ctrlr->ns[0].is_active = true; 2208 ctrlr->ns[2].is_active = false; 2209 ctrlr->nsdata[3].nsze = 2048; 2210 2211 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2212 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2213 cpl.cdw0 = event.raw; 2214 2215 aer_cb(nvme_ctrlr, &cpl); 2216 2217 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2218 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2219 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2220 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2221 CU_ASSERT(bdev->disk.blockcnt == 2048); 2222 2223 /* Change ANA state of active namespaces. */ 2224 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2225 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2226 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2227 2228 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2229 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2230 cpl.cdw0 = event.raw; 2231 2232 aer_cb(nvme_ctrlr, &cpl); 2233 2234 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2235 poll_threads(); 2236 2237 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2238 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2239 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2240 2241 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2242 CU_ASSERT(rc == 0); 2243 2244 poll_threads(); 2245 spdk_delay_us(1000); 2246 poll_threads(); 2247 2248 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2249 } 2250 2251 static void 2252 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2253 enum spdk_bdev_io_type io_type) 2254 { 2255 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2256 struct nvme_io_path *io_path; 2257 struct spdk_nvme_qpair *qpair; 2258 2259 io_path = bdev_nvme_find_io_path(nbdev_ch); 2260 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2261 qpair = io_path->qpair->qpair; 2262 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2263 2264 bdev_io->type = io_type; 2265 bdev_io->internal.in_submit_request = true; 2266 2267 bdev_nvme_submit_request(ch, bdev_io); 2268 2269 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2270 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2271 2272 poll_threads(); 2273 2274 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2275 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2276 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2277 } 2278 2279 static void 2280 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2281 enum spdk_bdev_io_type io_type) 2282 { 2283 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2284 struct nvme_io_path *io_path; 2285 struct spdk_nvme_qpair *qpair; 2286 2287 io_path = bdev_nvme_find_io_path(nbdev_ch); 2288 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2289 qpair = io_path->qpair->qpair; 2290 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2291 2292 bdev_io->type = io_type; 2293 bdev_io->internal.in_submit_request = true; 2294 2295 bdev_nvme_submit_request(ch, bdev_io); 2296 2297 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2298 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2299 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2300 } 2301 2302 static void 2303 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2304 { 2305 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2306 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2307 struct ut_nvme_req *req; 2308 struct nvme_io_path *io_path; 2309 struct spdk_nvme_qpair *qpair; 2310 2311 io_path = bdev_nvme_find_io_path(nbdev_ch); 2312 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2313 qpair = io_path->qpair->qpair; 2314 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2315 2316 /* Only compare and write now. */ 2317 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2318 bdev_io->internal.in_submit_request = true; 2319 2320 bdev_nvme_submit_request(ch, bdev_io); 2321 2322 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2323 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2324 CU_ASSERT(bio->first_fused_submitted == true); 2325 2326 /* First outstanding request is compare operation. */ 2327 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2328 SPDK_CU_ASSERT_FATAL(req != NULL); 2329 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2330 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2331 2332 poll_threads(); 2333 2334 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2335 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2336 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2337 } 2338 2339 static void 2340 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2341 struct spdk_nvme_ctrlr *ctrlr) 2342 { 2343 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2344 bdev_io->internal.in_submit_request = true; 2345 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2346 2347 bdev_nvme_submit_request(ch, bdev_io); 2348 2349 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2350 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2351 2352 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2353 poll_thread_times(1, 1); 2354 2355 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2356 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2357 2358 poll_thread_times(0, 1); 2359 2360 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2361 } 2362 2363 static void 2364 test_submit_nvme_cmd(void) 2365 { 2366 struct spdk_nvme_transport_id trid = {}; 2367 struct spdk_nvme_ctrlr *ctrlr; 2368 struct nvme_ctrlr *nvme_ctrlr; 2369 const int STRING_SIZE = 32; 2370 const char *attached_names[STRING_SIZE]; 2371 struct nvme_bdev *bdev; 2372 struct spdk_bdev_io *bdev_io; 2373 struct spdk_io_channel *ch; 2374 int rc; 2375 2376 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2377 ut_init_trid(&trid); 2378 2379 set_thread(1); 2380 2381 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2382 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2383 2384 g_ut_attach_ctrlr_status = 0; 2385 g_ut_attach_bdev_count = 1; 2386 2387 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2388 attach_ctrlr_done, NULL, NULL, NULL, false); 2389 CU_ASSERT(rc == 0); 2390 2391 spdk_delay_us(1000); 2392 poll_threads(); 2393 2394 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2395 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2396 2397 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2398 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2399 2400 set_thread(0); 2401 2402 ch = spdk_get_io_channel(bdev); 2403 SPDK_CU_ASSERT_FATAL(ch != NULL); 2404 2405 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2406 2407 bdev_io->u.bdev.iovs = NULL; 2408 2409 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2410 2411 ut_bdev_io_set_buf(bdev_io); 2412 2413 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2414 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2415 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2416 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2417 2418 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2419 2420 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2421 2422 /* Verify that ext NVME API is called when data is described by memory domain */ 2423 g_ut_read_ext_called = false; 2424 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2425 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2426 CU_ASSERT(g_ut_read_ext_called == true); 2427 g_ut_read_ext_called = false; 2428 bdev_io->u.bdev.memory_domain = NULL; 2429 2430 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2431 2432 free(bdev_io); 2433 2434 spdk_put_io_channel(ch); 2435 2436 poll_threads(); 2437 2438 set_thread(1); 2439 2440 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2441 CU_ASSERT(rc == 0); 2442 2443 poll_threads(); 2444 spdk_delay_us(1000); 2445 poll_threads(); 2446 2447 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2448 } 2449 2450 static void 2451 test_add_remove_trid(void) 2452 { 2453 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2454 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2455 struct nvme_ctrlr *nvme_ctrlr = NULL; 2456 const int STRING_SIZE = 32; 2457 const char *attached_names[STRING_SIZE]; 2458 struct nvme_path_id *ctrid; 2459 int rc; 2460 2461 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2462 ut_init_trid(&path1.trid); 2463 ut_init_trid2(&path2.trid); 2464 ut_init_trid3(&path3.trid); 2465 2466 set_thread(0); 2467 2468 g_ut_attach_ctrlr_status = 0; 2469 g_ut_attach_bdev_count = 0; 2470 2471 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2472 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2473 2474 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2475 attach_ctrlr_done, NULL, NULL, NULL, false); 2476 CU_ASSERT(rc == 0); 2477 2478 spdk_delay_us(1000); 2479 poll_threads(); 2480 2481 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2482 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2483 2484 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2485 2486 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2487 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2488 2489 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2490 attach_ctrlr_done, NULL, NULL, NULL, false); 2491 CU_ASSERT(rc == 0); 2492 2493 spdk_delay_us(1000); 2494 poll_threads(); 2495 2496 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2497 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2498 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2499 break; 2500 } 2501 } 2502 CU_ASSERT(ctrid != NULL); 2503 2504 /* trid3 is not in the registered list. */ 2505 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2506 CU_ASSERT(rc == -ENXIO); 2507 2508 /* trid2 is not used, and simply removed. */ 2509 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2510 CU_ASSERT(rc == 0); 2511 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2512 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2513 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2514 } 2515 2516 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2517 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2518 2519 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2520 attach_ctrlr_done, NULL, NULL, NULL, false); 2521 CU_ASSERT(rc == 0); 2522 2523 spdk_delay_us(1000); 2524 poll_threads(); 2525 2526 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2527 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2528 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2529 break; 2530 } 2531 } 2532 CU_ASSERT(ctrid != NULL); 2533 2534 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2535 * If we add path2 again, path2 should be inserted between path1 and path3. 2536 * Then, we remove path2. It is not used, and simply removed. 2537 */ 2538 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2539 2540 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2541 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2542 2543 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2544 attach_ctrlr_done, NULL, NULL, NULL, false); 2545 CU_ASSERT(rc == 0); 2546 2547 spdk_delay_us(1000); 2548 poll_threads(); 2549 2550 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2551 2552 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2553 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2554 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2555 2556 ctrid = TAILQ_NEXT(ctrid, link); 2557 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2558 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2559 2560 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2561 CU_ASSERT(rc == 0); 2562 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2563 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2564 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2565 } 2566 2567 /* path1 is currently used and path3 is an alternative path. 2568 * If we remove path1, path is changed to path3. 2569 */ 2570 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2571 CU_ASSERT(rc == 0); 2572 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2573 CU_ASSERT(nvme_ctrlr->resetting == true); 2574 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2575 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2576 } 2577 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2578 2579 poll_threads(); 2580 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2581 poll_threads(); 2582 2583 CU_ASSERT(nvme_ctrlr->resetting == false); 2584 2585 /* path3 is the current and only path. If we remove path3, the corresponding 2586 * nvme_ctrlr is removed. 2587 */ 2588 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2589 CU_ASSERT(rc == 0); 2590 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2591 2592 poll_threads(); 2593 spdk_delay_us(1000); 2594 poll_threads(); 2595 2596 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2597 2598 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2599 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2600 2601 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2602 attach_ctrlr_done, NULL, NULL, NULL, false); 2603 CU_ASSERT(rc == 0); 2604 2605 spdk_delay_us(1000); 2606 poll_threads(); 2607 2608 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2609 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2610 2611 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2612 2613 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2614 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2615 2616 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2617 attach_ctrlr_done, NULL, NULL, NULL, false); 2618 CU_ASSERT(rc == 0); 2619 2620 spdk_delay_us(1000); 2621 poll_threads(); 2622 2623 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2624 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2625 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2626 break; 2627 } 2628 } 2629 CU_ASSERT(ctrid != NULL); 2630 2631 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2632 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2633 CU_ASSERT(rc == 0); 2634 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2635 2636 poll_threads(); 2637 spdk_delay_us(1000); 2638 poll_threads(); 2639 2640 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2641 } 2642 2643 static void 2644 test_abort(void) 2645 { 2646 struct spdk_nvme_transport_id trid = {}; 2647 struct nvme_ctrlr_opts opts = {}; 2648 struct spdk_nvme_ctrlr *ctrlr; 2649 struct nvme_ctrlr *nvme_ctrlr; 2650 const int STRING_SIZE = 32; 2651 const char *attached_names[STRING_SIZE]; 2652 struct nvme_bdev *bdev; 2653 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2654 struct spdk_io_channel *ch1, *ch2; 2655 struct nvme_bdev_channel *nbdev_ch1; 2656 struct nvme_io_path *io_path1; 2657 struct nvme_qpair *nvme_qpair1; 2658 int rc; 2659 2660 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2661 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2662 * are submitted on thread 1. Both should succeed. 2663 */ 2664 2665 ut_init_trid(&trid); 2666 2667 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2668 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2669 2670 g_ut_attach_ctrlr_status = 0; 2671 g_ut_attach_bdev_count = 1; 2672 2673 set_thread(1); 2674 2675 opts.ctrlr_loss_timeout_sec = -1; 2676 opts.reconnect_delay_sec = 1; 2677 2678 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2679 attach_ctrlr_done, NULL, NULL, &opts, false); 2680 CU_ASSERT(rc == 0); 2681 2682 spdk_delay_us(1000); 2683 poll_threads(); 2684 2685 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2686 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2687 2688 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2689 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2690 2691 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2692 ut_bdev_io_set_buf(write_io); 2693 2694 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2695 ut_bdev_io_set_buf(fuse_io); 2696 2697 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2698 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2699 2700 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2701 2702 set_thread(0); 2703 2704 ch1 = spdk_get_io_channel(bdev); 2705 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2706 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2707 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2708 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2709 nvme_qpair1 = io_path1->qpair; 2710 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2711 2712 set_thread(1); 2713 2714 ch2 = spdk_get_io_channel(bdev); 2715 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2716 2717 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2718 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2719 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2720 2721 /* Aborting the already completed request should fail. */ 2722 write_io->internal.in_submit_request = true; 2723 bdev_nvme_submit_request(ch1, write_io); 2724 poll_threads(); 2725 2726 CU_ASSERT(write_io->internal.in_submit_request == false); 2727 2728 abort_io->u.abort.bio_to_abort = write_io; 2729 abort_io->internal.in_submit_request = true; 2730 2731 bdev_nvme_submit_request(ch1, abort_io); 2732 2733 poll_threads(); 2734 2735 CU_ASSERT(abort_io->internal.in_submit_request == false); 2736 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2737 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2738 2739 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2740 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2741 2742 admin_io->internal.in_submit_request = true; 2743 bdev_nvme_submit_request(ch1, admin_io); 2744 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2745 poll_threads(); 2746 2747 CU_ASSERT(admin_io->internal.in_submit_request == false); 2748 2749 abort_io->u.abort.bio_to_abort = admin_io; 2750 abort_io->internal.in_submit_request = true; 2751 2752 bdev_nvme_submit_request(ch2, abort_io); 2753 2754 poll_threads(); 2755 2756 CU_ASSERT(abort_io->internal.in_submit_request == false); 2757 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2758 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2759 2760 /* Aborting the write request should succeed. */ 2761 write_io->internal.in_submit_request = true; 2762 bdev_nvme_submit_request(ch1, write_io); 2763 2764 CU_ASSERT(write_io->internal.in_submit_request == true); 2765 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2766 2767 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2768 abort_io->u.abort.bio_to_abort = write_io; 2769 abort_io->internal.in_submit_request = true; 2770 2771 bdev_nvme_submit_request(ch1, abort_io); 2772 2773 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2774 poll_threads(); 2775 2776 CU_ASSERT(abort_io->internal.in_submit_request == false); 2777 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2778 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2779 CU_ASSERT(write_io->internal.in_submit_request == false); 2780 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2781 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2782 2783 /* Aborting the fuse request should succeed. */ 2784 fuse_io->internal.in_submit_request = true; 2785 bdev_nvme_submit_request(ch1, fuse_io); 2786 2787 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2788 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2789 2790 abort_io->u.abort.bio_to_abort = fuse_io; 2791 abort_io->internal.in_submit_request = true; 2792 2793 bdev_nvme_submit_request(ch1, abort_io); 2794 2795 spdk_delay_us(10000); 2796 poll_threads(); 2797 2798 CU_ASSERT(abort_io->internal.in_submit_request == false); 2799 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2800 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2801 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2802 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2803 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2804 2805 /* Aborting the admin request should succeed. */ 2806 admin_io->internal.in_submit_request = true; 2807 bdev_nvme_submit_request(ch1, admin_io); 2808 2809 CU_ASSERT(admin_io->internal.in_submit_request == true); 2810 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2811 2812 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2813 abort_io->u.abort.bio_to_abort = admin_io; 2814 abort_io->internal.in_submit_request = true; 2815 2816 bdev_nvme_submit_request(ch2, abort_io); 2817 2818 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2819 poll_threads(); 2820 2821 CU_ASSERT(abort_io->internal.in_submit_request == false); 2822 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2823 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2824 CU_ASSERT(admin_io->internal.in_submit_request == false); 2825 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2826 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2827 2828 set_thread(0); 2829 2830 /* If qpair is disconnected, it is freed and then reconnected via resetting 2831 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2832 * while resetting the nvme_ctrlr. 2833 */ 2834 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2835 2836 poll_thread_times(0, 3); 2837 2838 CU_ASSERT(nvme_qpair1->qpair == NULL); 2839 CU_ASSERT(nvme_ctrlr->resetting == true); 2840 2841 write_io->internal.in_submit_request = true; 2842 2843 bdev_nvme_submit_request(ch1, write_io); 2844 2845 CU_ASSERT(write_io->internal.in_submit_request == true); 2846 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2847 2848 /* Aborting the queued write request should succeed immediately. */ 2849 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2850 abort_io->u.abort.bio_to_abort = write_io; 2851 abort_io->internal.in_submit_request = true; 2852 2853 bdev_nvme_submit_request(ch1, abort_io); 2854 2855 CU_ASSERT(abort_io->internal.in_submit_request == false); 2856 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2857 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2858 CU_ASSERT(write_io->internal.in_submit_request == false); 2859 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2860 2861 poll_threads(); 2862 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2863 poll_threads(); 2864 2865 spdk_put_io_channel(ch1); 2866 2867 set_thread(1); 2868 2869 spdk_put_io_channel(ch2); 2870 2871 poll_threads(); 2872 2873 free(write_io); 2874 free(fuse_io); 2875 free(admin_io); 2876 free(abort_io); 2877 2878 set_thread(1); 2879 2880 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2881 CU_ASSERT(rc == 0); 2882 2883 poll_threads(); 2884 spdk_delay_us(1000); 2885 poll_threads(); 2886 2887 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2888 } 2889 2890 static void 2891 test_get_io_qpair(void) 2892 { 2893 struct spdk_nvme_transport_id trid = {}; 2894 struct spdk_nvme_ctrlr ctrlr = {}; 2895 struct nvme_ctrlr *nvme_ctrlr = NULL; 2896 struct spdk_io_channel *ch; 2897 struct nvme_ctrlr_channel *ctrlr_ch; 2898 struct spdk_nvme_qpair *qpair; 2899 int rc; 2900 2901 ut_init_trid(&trid); 2902 TAILQ_INIT(&ctrlr.active_io_qpairs); 2903 2904 set_thread(0); 2905 2906 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2907 CU_ASSERT(rc == 0); 2908 2909 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2910 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2911 2912 ch = spdk_get_io_channel(nvme_ctrlr); 2913 SPDK_CU_ASSERT_FATAL(ch != NULL); 2914 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2915 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2916 2917 qpair = bdev_nvme_get_io_qpair(ch); 2918 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2919 2920 spdk_put_io_channel(ch); 2921 2922 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2923 CU_ASSERT(rc == 0); 2924 2925 poll_threads(); 2926 spdk_delay_us(1000); 2927 poll_threads(); 2928 2929 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2930 } 2931 2932 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2933 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2934 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2935 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2936 */ 2937 static void 2938 test_bdev_unregister(void) 2939 { 2940 struct spdk_nvme_transport_id trid = {}; 2941 struct spdk_nvme_ctrlr *ctrlr; 2942 struct nvme_ctrlr *nvme_ctrlr; 2943 struct nvme_ns *nvme_ns1, *nvme_ns2; 2944 const int STRING_SIZE = 32; 2945 const char *attached_names[STRING_SIZE]; 2946 struct nvme_bdev *bdev1, *bdev2; 2947 int rc; 2948 2949 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2950 ut_init_trid(&trid); 2951 2952 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2953 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2954 2955 g_ut_attach_ctrlr_status = 0; 2956 g_ut_attach_bdev_count = 2; 2957 2958 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2959 attach_ctrlr_done, NULL, NULL, NULL, false); 2960 CU_ASSERT(rc == 0); 2961 2962 spdk_delay_us(1000); 2963 poll_threads(); 2964 2965 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2966 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2967 2968 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2969 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2970 2971 bdev1 = nvme_ns1->bdev; 2972 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2973 2974 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2975 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2976 2977 bdev2 = nvme_ns2->bdev; 2978 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2979 2980 bdev_nvme_destruct(&bdev1->disk); 2981 bdev_nvme_destruct(&bdev2->disk); 2982 2983 poll_threads(); 2984 2985 CU_ASSERT(nvme_ns1->bdev == NULL); 2986 CU_ASSERT(nvme_ns2->bdev == NULL); 2987 2988 nvme_ctrlr->destruct = true; 2989 _nvme_ctrlr_destruct(nvme_ctrlr); 2990 2991 poll_threads(); 2992 spdk_delay_us(1000); 2993 poll_threads(); 2994 2995 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2996 } 2997 2998 static void 2999 test_compare_ns(void) 3000 { 3001 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3002 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3003 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3004 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3005 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3006 3007 /* No IDs are defined. */ 3008 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3009 3010 /* Only EUI64 are defined and not matched. */ 3011 nsdata1.eui64 = 0xABCDEF0123456789; 3012 nsdata2.eui64 = 0xBBCDEF0123456789; 3013 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3014 3015 /* Only EUI64 are defined and matched. */ 3016 nsdata2.eui64 = 0xABCDEF0123456789; 3017 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3018 3019 /* Only NGUID are defined and not matched. */ 3020 nsdata1.eui64 = 0x0; 3021 nsdata2.eui64 = 0x0; 3022 nsdata1.nguid[0] = 0x12; 3023 nsdata2.nguid[0] = 0x10; 3024 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3025 3026 /* Only NGUID are defined and matched. */ 3027 nsdata2.nguid[0] = 0x12; 3028 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3029 3030 /* Only UUID are defined and not matched. */ 3031 nsdata1.nguid[0] = 0x0; 3032 nsdata2.nguid[0] = 0x0; 3033 ns1.uuid = &uuid1; 3034 ns2.uuid = &uuid2; 3035 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3036 3037 /* Only one UUID is defined. */ 3038 ns1.uuid = NULL; 3039 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3040 3041 /* Only UUID are defined and matched. */ 3042 ns1.uuid = &uuid2; 3043 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3044 3045 /* All EUI64, NGUID, and UUID are defined and matched. */ 3046 nsdata1.eui64 = 0x123456789ABCDEF; 3047 nsdata2.eui64 = 0x123456789ABCDEF; 3048 nsdata1.nguid[15] = 0x34; 3049 nsdata2.nguid[15] = 0x34; 3050 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3051 3052 /* CSI are not matched. */ 3053 ns1.csi = SPDK_NVME_CSI_ZNS; 3054 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3055 } 3056 3057 static void 3058 test_init_ana_log_page(void) 3059 { 3060 struct spdk_nvme_transport_id trid = {}; 3061 struct spdk_nvme_ctrlr *ctrlr; 3062 struct nvme_ctrlr *nvme_ctrlr; 3063 const int STRING_SIZE = 32; 3064 const char *attached_names[STRING_SIZE]; 3065 int rc; 3066 3067 set_thread(0); 3068 3069 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3070 ut_init_trid(&trid); 3071 3072 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3073 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3074 3075 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3076 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3077 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3078 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3079 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3080 3081 g_ut_attach_ctrlr_status = 0; 3082 g_ut_attach_bdev_count = 5; 3083 3084 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3085 attach_ctrlr_done, NULL, NULL, NULL, false); 3086 CU_ASSERT(rc == 0); 3087 3088 spdk_delay_us(1000); 3089 poll_threads(); 3090 3091 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3092 poll_threads(); 3093 3094 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3095 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3096 3097 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3098 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3099 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3100 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3101 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3102 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3103 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3104 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3105 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3106 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3107 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3108 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3109 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3110 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3111 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3112 3113 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3114 CU_ASSERT(rc == 0); 3115 3116 poll_threads(); 3117 spdk_delay_us(1000); 3118 poll_threads(); 3119 3120 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3121 } 3122 3123 static void 3124 init_accel(void) 3125 { 3126 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3127 sizeof(int), "accel_p"); 3128 } 3129 3130 static void 3131 fini_accel(void) 3132 { 3133 spdk_io_device_unregister(g_accel_p, NULL); 3134 } 3135 3136 static void 3137 test_get_memory_domains(void) 3138 { 3139 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3140 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3141 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3142 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3143 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3144 struct spdk_memory_domain *domains[4] = {}; 3145 int rc = 0; 3146 3147 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3148 3149 /* nvme controller doesn't have memory domains */ 3150 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3151 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3152 CU_ASSERT(rc == 0); 3153 CU_ASSERT(domains[0] == NULL); 3154 CU_ASSERT(domains[1] == NULL); 3155 3156 /* nvme controller has a memory domain */ 3157 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3158 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3159 CU_ASSERT(rc == 1); 3160 CU_ASSERT(domains[0] != NULL); 3161 memset(domains, 0, sizeof(domains)); 3162 3163 /* multipath, 2 controllers report 1 memory domain each */ 3164 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3165 3166 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3167 CU_ASSERT(rc == 2); 3168 CU_ASSERT(domains[0] != NULL); 3169 CU_ASSERT(domains[1] != NULL); 3170 memset(domains, 0, sizeof(domains)); 3171 3172 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3173 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3174 CU_ASSERT(rc == 2); 3175 3176 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3177 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3178 CU_ASSERT(rc == 2); 3179 CU_ASSERT(domains[0] == NULL); 3180 CU_ASSERT(domains[1] == NULL); 3181 3182 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3183 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3184 CU_ASSERT(rc == 2); 3185 CU_ASSERT(domains[0] != NULL); 3186 CU_ASSERT(domains[1] == NULL); 3187 memset(domains, 0, sizeof(domains)); 3188 3189 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3190 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3191 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3192 CU_ASSERT(rc == 4); 3193 CU_ASSERT(domains[0] != NULL); 3194 CU_ASSERT(domains[1] != NULL); 3195 CU_ASSERT(domains[2] != NULL); 3196 CU_ASSERT(domains[3] != NULL); 3197 memset(domains, 0, sizeof(domains)); 3198 3199 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3200 * Array size is less than the number of memory domains */ 3201 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3202 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3203 CU_ASSERT(rc == 4); 3204 CU_ASSERT(domains[0] != NULL); 3205 CU_ASSERT(domains[1] != NULL); 3206 CU_ASSERT(domains[2] != NULL); 3207 CU_ASSERT(domains[3] == NULL); 3208 memset(domains, 0, sizeof(domains)); 3209 3210 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3211 } 3212 3213 static void 3214 test_reconnect_qpair(void) 3215 { 3216 struct spdk_nvme_transport_id trid = {}; 3217 struct spdk_nvme_ctrlr *ctrlr; 3218 struct nvme_ctrlr *nvme_ctrlr; 3219 const int STRING_SIZE = 32; 3220 const char *attached_names[STRING_SIZE]; 3221 struct nvme_bdev *bdev; 3222 struct spdk_io_channel *ch1, *ch2; 3223 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3224 struct nvme_io_path *io_path1, *io_path2; 3225 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3226 int rc; 3227 3228 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3229 ut_init_trid(&trid); 3230 3231 set_thread(0); 3232 3233 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3234 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3235 3236 g_ut_attach_ctrlr_status = 0; 3237 g_ut_attach_bdev_count = 1; 3238 3239 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3240 attach_ctrlr_done, NULL, NULL, NULL, false); 3241 CU_ASSERT(rc == 0); 3242 3243 spdk_delay_us(1000); 3244 poll_threads(); 3245 3246 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3247 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3248 3249 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3250 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3251 3252 ch1 = spdk_get_io_channel(bdev); 3253 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3254 3255 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3256 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3257 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3258 nvme_qpair1 = io_path1->qpair; 3259 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3260 3261 set_thread(1); 3262 3263 ch2 = spdk_get_io_channel(bdev); 3264 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3265 3266 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3267 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3268 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3269 nvme_qpair2 = io_path2->qpair; 3270 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3271 3272 /* If a qpair is disconnected, it is freed and then reconnected via 3273 * resetting the corresponding nvme_ctrlr. 3274 */ 3275 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3276 ctrlr->is_failed = true; 3277 3278 poll_thread_times(1, 3); 3279 CU_ASSERT(nvme_qpair1->qpair != NULL); 3280 CU_ASSERT(nvme_qpair2->qpair == NULL); 3281 CU_ASSERT(nvme_ctrlr->resetting == true); 3282 3283 poll_thread_times(0, 3); 3284 CU_ASSERT(nvme_qpair1->qpair == NULL); 3285 CU_ASSERT(nvme_qpair2->qpair == NULL); 3286 CU_ASSERT(ctrlr->is_failed == true); 3287 3288 poll_thread_times(1, 2); 3289 poll_thread_times(0, 1); 3290 CU_ASSERT(ctrlr->is_failed == false); 3291 CU_ASSERT(ctrlr->adminq.is_connected == false); 3292 3293 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3294 poll_thread_times(0, 2); 3295 CU_ASSERT(ctrlr->adminq.is_connected == true); 3296 3297 poll_thread_times(0, 1); 3298 poll_thread_times(1, 1); 3299 CU_ASSERT(nvme_qpair1->qpair != NULL); 3300 CU_ASSERT(nvme_qpair2->qpair != NULL); 3301 CU_ASSERT(nvme_ctrlr->resetting == true); 3302 3303 poll_thread_times(0, 2); 3304 poll_thread_times(1, 1); 3305 poll_thread_times(0, 1); 3306 CU_ASSERT(nvme_ctrlr->resetting == false); 3307 3308 poll_threads(); 3309 3310 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3311 * fails, the qpair is just freed. 3312 */ 3313 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3314 ctrlr->is_failed = true; 3315 ctrlr->fail_reset = true; 3316 3317 poll_thread_times(1, 3); 3318 CU_ASSERT(nvme_qpair1->qpair != NULL); 3319 CU_ASSERT(nvme_qpair2->qpair == NULL); 3320 CU_ASSERT(nvme_ctrlr->resetting == true); 3321 3322 poll_thread_times(0, 3); 3323 poll_thread_times(1, 1); 3324 CU_ASSERT(nvme_qpair1->qpair == NULL); 3325 CU_ASSERT(nvme_qpair2->qpair == NULL); 3326 CU_ASSERT(ctrlr->is_failed == true); 3327 3328 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3329 poll_thread_times(0, 3); 3330 poll_thread_times(1, 1); 3331 poll_thread_times(0, 1); 3332 CU_ASSERT(ctrlr->is_failed == true); 3333 CU_ASSERT(nvme_ctrlr->resetting == false); 3334 CU_ASSERT(nvme_qpair1->qpair == NULL); 3335 CU_ASSERT(nvme_qpair2->qpair == NULL); 3336 3337 poll_threads(); 3338 3339 spdk_put_io_channel(ch2); 3340 3341 set_thread(0); 3342 3343 spdk_put_io_channel(ch1); 3344 3345 poll_threads(); 3346 3347 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3348 CU_ASSERT(rc == 0); 3349 3350 poll_threads(); 3351 spdk_delay_us(1000); 3352 poll_threads(); 3353 3354 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3355 } 3356 3357 static void 3358 test_create_bdev_ctrlr(void) 3359 { 3360 struct nvme_path_id path1 = {}, path2 = {}; 3361 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3362 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3363 const int STRING_SIZE = 32; 3364 const char *attached_names[STRING_SIZE]; 3365 int rc; 3366 3367 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3368 ut_init_trid(&path1.trid); 3369 ut_init_trid2(&path2.trid); 3370 3371 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3372 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3373 3374 g_ut_attach_ctrlr_status = 0; 3375 g_ut_attach_bdev_count = 0; 3376 3377 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3378 attach_ctrlr_done, NULL, NULL, NULL, true); 3379 CU_ASSERT(rc == 0); 3380 3381 spdk_delay_us(1000); 3382 poll_threads(); 3383 3384 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3385 poll_threads(); 3386 3387 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3388 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3389 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3390 3391 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3392 g_ut_attach_ctrlr_status = -EINVAL; 3393 3394 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3395 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3396 3397 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3398 3399 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3400 attach_ctrlr_done, NULL, NULL, NULL, true); 3401 CU_ASSERT(rc == 0); 3402 3403 spdk_delay_us(1000); 3404 poll_threads(); 3405 3406 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3407 poll_threads(); 3408 3409 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3410 3411 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3412 g_ut_attach_ctrlr_status = 0; 3413 3414 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3415 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3416 3417 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3418 attach_ctrlr_done, NULL, NULL, NULL, true); 3419 CU_ASSERT(rc == 0); 3420 3421 spdk_delay_us(1000); 3422 poll_threads(); 3423 3424 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3425 poll_threads(); 3426 3427 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3428 3429 /* Delete two ctrlrs at once. */ 3430 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3431 CU_ASSERT(rc == 0); 3432 3433 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3434 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3435 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3436 3437 poll_threads(); 3438 spdk_delay_us(1000); 3439 poll_threads(); 3440 3441 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3442 3443 /* Add two ctrlrs and delete one by one. */ 3444 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3445 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3446 3447 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3448 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3449 3450 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3451 attach_ctrlr_done, NULL, NULL, NULL, true); 3452 CU_ASSERT(rc == 0); 3453 3454 spdk_delay_us(1000); 3455 poll_threads(); 3456 3457 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3458 poll_threads(); 3459 3460 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3461 attach_ctrlr_done, NULL, NULL, NULL, true); 3462 CU_ASSERT(rc == 0); 3463 3464 spdk_delay_us(1000); 3465 poll_threads(); 3466 3467 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3468 poll_threads(); 3469 3470 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3471 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3472 3473 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3474 CU_ASSERT(rc == 0); 3475 3476 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3477 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3478 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3479 3480 poll_threads(); 3481 spdk_delay_us(1000); 3482 poll_threads(); 3483 3484 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3485 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3486 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3487 3488 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3489 CU_ASSERT(rc == 0); 3490 3491 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3492 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3493 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3494 3495 poll_threads(); 3496 spdk_delay_us(1000); 3497 poll_threads(); 3498 3499 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3500 } 3501 3502 static struct nvme_ns * 3503 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3504 { 3505 struct nvme_ns *nvme_ns; 3506 3507 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3508 if (nvme_ns->ctrlr == nvme_ctrlr) { 3509 return nvme_ns; 3510 } 3511 } 3512 3513 return NULL; 3514 } 3515 3516 static void 3517 test_add_multi_ns_to_bdev(void) 3518 { 3519 struct nvme_path_id path1 = {}, path2 = {}; 3520 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3521 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3522 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3523 struct nvme_ns *nvme_ns1, *nvme_ns2; 3524 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3525 const int STRING_SIZE = 32; 3526 const char *attached_names[STRING_SIZE]; 3527 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3528 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3529 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3530 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3531 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3532 int rc; 3533 3534 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3535 ut_init_trid(&path1.trid); 3536 ut_init_trid2(&path2.trid); 3537 3538 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3539 3540 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3541 * namespaces are populated. 3542 */ 3543 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3544 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3545 3546 ctrlr1->ns[1].is_active = false; 3547 ctrlr1->ns[4].is_active = false; 3548 ctrlr1->ns[0].uuid = &uuid1; 3549 ctrlr1->ns[2].uuid = &uuid3; 3550 ctrlr1->ns[3].uuid = &uuid4; 3551 3552 g_ut_attach_ctrlr_status = 0; 3553 g_ut_attach_bdev_count = 3; 3554 3555 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3556 attach_ctrlr_done, NULL, NULL, NULL, true); 3557 CU_ASSERT(rc == 0); 3558 3559 spdk_delay_us(1000); 3560 poll_threads(); 3561 3562 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3563 poll_threads(); 3564 3565 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3566 * namespaces are populated. The uuid of 4th namespace is different, and hence 3567 * adding 4th namespace to a bdev should fail. 3568 */ 3569 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3570 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3571 3572 ctrlr2->ns[2].is_active = false; 3573 ctrlr2->ns[4].is_active = false; 3574 ctrlr2->ns[0].uuid = &uuid1; 3575 ctrlr2->ns[1].uuid = &uuid2; 3576 ctrlr2->ns[3].uuid = &uuid44; 3577 3578 g_ut_attach_ctrlr_status = 0; 3579 g_ut_attach_bdev_count = 2; 3580 3581 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3582 attach_ctrlr_done, NULL, NULL, NULL, true); 3583 CU_ASSERT(rc == 0); 3584 3585 spdk_delay_us(1000); 3586 poll_threads(); 3587 3588 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3589 poll_threads(); 3590 3591 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3592 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3593 3594 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3595 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3596 3597 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3598 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3599 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3600 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3601 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3602 3603 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3604 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3605 3606 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3607 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3608 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3609 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3610 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3611 3612 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3613 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3614 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3615 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3616 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3617 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3618 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3619 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3620 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3621 3622 CU_ASSERT(bdev1->ref == 2); 3623 CU_ASSERT(bdev2->ref == 1); 3624 CU_ASSERT(bdev3->ref == 1); 3625 CU_ASSERT(bdev4->ref == 1); 3626 3627 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3628 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3629 CU_ASSERT(rc == 0); 3630 3631 poll_threads(); 3632 spdk_delay_us(1000); 3633 poll_threads(); 3634 3635 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3636 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3637 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3638 3639 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3640 CU_ASSERT(rc == 0); 3641 3642 poll_threads(); 3643 spdk_delay_us(1000); 3644 poll_threads(); 3645 3646 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3647 3648 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3649 * can be deleted when the bdev subsystem shutdown. 3650 */ 3651 g_ut_attach_bdev_count = 1; 3652 3653 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3654 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3655 3656 ctrlr1->ns[0].uuid = &uuid1; 3657 3658 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3659 attach_ctrlr_done, NULL, NULL, NULL, true); 3660 CU_ASSERT(rc == 0); 3661 3662 spdk_delay_us(1000); 3663 poll_threads(); 3664 3665 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3666 poll_threads(); 3667 3668 ut_init_trid2(&path2.trid); 3669 3670 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3671 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3672 3673 ctrlr2->ns[0].uuid = &uuid1; 3674 3675 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3676 attach_ctrlr_done, NULL, NULL, NULL, true); 3677 CU_ASSERT(rc == 0); 3678 3679 spdk_delay_us(1000); 3680 poll_threads(); 3681 3682 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3683 poll_threads(); 3684 3685 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3686 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3687 3688 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3689 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3690 3691 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3692 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3693 3694 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3695 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3696 3697 /* Check if a nvme_bdev has two nvme_ns. */ 3698 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3699 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3700 CU_ASSERT(nvme_ns1->bdev == bdev1); 3701 3702 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3703 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3704 CU_ASSERT(nvme_ns2->bdev == bdev1); 3705 3706 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3707 bdev_nvme_destruct(&bdev1->disk); 3708 3709 poll_threads(); 3710 3711 CU_ASSERT(nvme_ns1->bdev == NULL); 3712 CU_ASSERT(nvme_ns2->bdev == NULL); 3713 3714 nvme_ctrlr1->destruct = true; 3715 _nvme_ctrlr_destruct(nvme_ctrlr1); 3716 3717 poll_threads(); 3718 spdk_delay_us(1000); 3719 poll_threads(); 3720 3721 nvme_ctrlr2->destruct = true; 3722 _nvme_ctrlr_destruct(nvme_ctrlr2); 3723 3724 poll_threads(); 3725 spdk_delay_us(1000); 3726 poll_threads(); 3727 3728 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3729 } 3730 3731 static void 3732 test_add_multi_io_paths_to_nbdev_ch(void) 3733 { 3734 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3735 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3736 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3737 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3738 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3739 const int STRING_SIZE = 32; 3740 const char *attached_names[STRING_SIZE]; 3741 struct nvme_bdev *bdev; 3742 struct spdk_io_channel *ch; 3743 struct nvme_bdev_channel *nbdev_ch; 3744 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3745 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3746 int rc; 3747 3748 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3749 ut_init_trid(&path1.trid); 3750 ut_init_trid2(&path2.trid); 3751 ut_init_trid3(&path3.trid); 3752 g_ut_attach_ctrlr_status = 0; 3753 g_ut_attach_bdev_count = 1; 3754 3755 set_thread(1); 3756 3757 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3758 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3759 3760 ctrlr1->ns[0].uuid = &uuid1; 3761 3762 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3763 attach_ctrlr_done, NULL, NULL, NULL, true); 3764 CU_ASSERT(rc == 0); 3765 3766 spdk_delay_us(1000); 3767 poll_threads(); 3768 3769 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3770 poll_threads(); 3771 3772 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3773 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3774 3775 ctrlr2->ns[0].uuid = &uuid1; 3776 3777 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3778 attach_ctrlr_done, NULL, NULL, NULL, true); 3779 CU_ASSERT(rc == 0); 3780 3781 spdk_delay_us(1000); 3782 poll_threads(); 3783 3784 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3785 poll_threads(); 3786 3787 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3788 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3789 3790 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3791 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3792 3793 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3794 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3795 3796 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3797 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3798 3799 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3800 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3801 3802 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3803 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3804 3805 set_thread(0); 3806 3807 ch = spdk_get_io_channel(bdev); 3808 SPDK_CU_ASSERT_FATAL(ch != NULL); 3809 nbdev_ch = spdk_io_channel_get_ctx(ch); 3810 3811 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3812 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3813 3814 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3815 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3816 3817 set_thread(1); 3818 3819 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3820 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3821 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3822 3823 ctrlr3->ns[0].uuid = &uuid1; 3824 3825 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3826 attach_ctrlr_done, NULL, NULL, NULL, true); 3827 CU_ASSERT(rc == 0); 3828 3829 spdk_delay_us(1000); 3830 poll_threads(); 3831 3832 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3833 poll_threads(); 3834 3835 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3836 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3837 3838 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3839 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3840 3841 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3842 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3843 3844 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3845 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3846 CU_ASSERT(rc == 0); 3847 3848 poll_threads(); 3849 spdk_delay_us(1000); 3850 poll_threads(); 3851 3852 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3853 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3854 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3855 3856 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3857 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3858 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3859 3860 set_thread(0); 3861 3862 spdk_put_io_channel(ch); 3863 3864 poll_threads(); 3865 3866 set_thread(1); 3867 3868 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3869 CU_ASSERT(rc == 0); 3870 3871 poll_threads(); 3872 spdk_delay_us(1000); 3873 poll_threads(); 3874 3875 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3876 } 3877 3878 static void 3879 test_admin_path(void) 3880 { 3881 struct nvme_path_id path1 = {}, path2 = {}; 3882 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3883 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3884 const int STRING_SIZE = 32; 3885 const char *attached_names[STRING_SIZE]; 3886 struct nvme_bdev *bdev; 3887 struct spdk_io_channel *ch; 3888 struct spdk_bdev_io *bdev_io; 3889 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3890 int rc; 3891 3892 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3893 ut_init_trid(&path1.trid); 3894 ut_init_trid2(&path2.trid); 3895 g_ut_attach_ctrlr_status = 0; 3896 g_ut_attach_bdev_count = 1; 3897 3898 set_thread(0); 3899 3900 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3901 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3902 3903 ctrlr1->ns[0].uuid = &uuid1; 3904 3905 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3906 attach_ctrlr_done, NULL, NULL, NULL, true); 3907 CU_ASSERT(rc == 0); 3908 3909 spdk_delay_us(1000); 3910 poll_threads(); 3911 3912 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3913 poll_threads(); 3914 3915 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3916 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3917 3918 ctrlr2->ns[0].uuid = &uuid1; 3919 3920 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3921 attach_ctrlr_done, NULL, NULL, NULL, true); 3922 CU_ASSERT(rc == 0); 3923 3924 spdk_delay_us(1000); 3925 poll_threads(); 3926 3927 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3928 poll_threads(); 3929 3930 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3931 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3932 3933 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3934 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3935 3936 ch = spdk_get_io_channel(bdev); 3937 SPDK_CU_ASSERT_FATAL(ch != NULL); 3938 3939 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3940 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3941 3942 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3943 * submitted to ctrlr2. 3944 */ 3945 ctrlr1->is_failed = true; 3946 bdev_io->internal.in_submit_request = true; 3947 3948 bdev_nvme_submit_request(ch, bdev_io); 3949 3950 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3951 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3952 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3953 3954 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3955 poll_threads(); 3956 3957 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3958 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3959 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3960 3961 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3962 ctrlr2->is_failed = true; 3963 bdev_io->internal.in_submit_request = true; 3964 3965 bdev_nvme_submit_request(ch, bdev_io); 3966 3967 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3968 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3969 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3970 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3971 3972 free(bdev_io); 3973 3974 spdk_put_io_channel(ch); 3975 3976 poll_threads(); 3977 3978 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3979 CU_ASSERT(rc == 0); 3980 3981 poll_threads(); 3982 spdk_delay_us(1000); 3983 poll_threads(); 3984 3985 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3986 } 3987 3988 static struct nvme_io_path * 3989 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3990 struct nvme_ctrlr *nvme_ctrlr) 3991 { 3992 struct nvme_io_path *io_path; 3993 3994 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3995 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3996 return io_path; 3997 } 3998 } 3999 4000 return NULL; 4001 } 4002 4003 static void 4004 test_reset_bdev_ctrlr(void) 4005 { 4006 struct nvme_path_id path1 = {}, path2 = {}; 4007 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4008 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4009 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4010 struct nvme_path_id *curr_path1, *curr_path2; 4011 const int STRING_SIZE = 32; 4012 const char *attached_names[STRING_SIZE]; 4013 struct nvme_bdev *bdev; 4014 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4015 struct nvme_bdev_io *first_bio; 4016 struct spdk_io_channel *ch1, *ch2; 4017 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4018 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4019 int rc; 4020 4021 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4022 ut_init_trid(&path1.trid); 4023 ut_init_trid2(&path2.trid); 4024 g_ut_attach_ctrlr_status = 0; 4025 g_ut_attach_bdev_count = 1; 4026 4027 set_thread(0); 4028 4029 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4030 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4031 4032 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4033 attach_ctrlr_done, NULL, NULL, NULL, true); 4034 CU_ASSERT(rc == 0); 4035 4036 spdk_delay_us(1000); 4037 poll_threads(); 4038 4039 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4040 poll_threads(); 4041 4042 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4043 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4044 4045 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4046 attach_ctrlr_done, NULL, NULL, NULL, true); 4047 CU_ASSERT(rc == 0); 4048 4049 spdk_delay_us(1000); 4050 poll_threads(); 4051 4052 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4053 poll_threads(); 4054 4055 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4056 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4057 4058 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4059 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4060 4061 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4062 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4063 4064 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4065 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4066 4067 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4068 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4069 4070 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4071 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4072 4073 set_thread(0); 4074 4075 ch1 = spdk_get_io_channel(bdev); 4076 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4077 4078 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4079 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4080 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4081 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4082 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4083 4084 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4085 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4086 4087 set_thread(1); 4088 4089 ch2 = spdk_get_io_channel(bdev); 4090 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4091 4092 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4093 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4094 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4095 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4096 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4097 4098 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4099 4100 /* The first reset request from bdev_io is submitted on thread 0. 4101 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4102 * 4103 * A few extra polls are necessary after resetting ctrlr1 to check 4104 * pending reset requests for ctrlr1. 4105 */ 4106 ctrlr1->is_failed = true; 4107 curr_path1->last_failed_tsc = spdk_get_ticks(); 4108 ctrlr2->is_failed = true; 4109 curr_path2->last_failed_tsc = spdk_get_ticks(); 4110 4111 set_thread(0); 4112 4113 bdev_nvme_submit_request(ch1, first_bdev_io); 4114 CU_ASSERT(first_bio->io_path == io_path11); 4115 CU_ASSERT(nvme_ctrlr1->resetting == true); 4116 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4117 4118 poll_thread_times(0, 3); 4119 CU_ASSERT(io_path11->qpair->qpair == NULL); 4120 CU_ASSERT(io_path21->qpair->qpair != NULL); 4121 4122 poll_thread_times(1, 2); 4123 CU_ASSERT(io_path11->qpair->qpair == NULL); 4124 CU_ASSERT(io_path21->qpair->qpair == NULL); 4125 CU_ASSERT(ctrlr1->is_failed == true); 4126 4127 poll_thread_times(0, 1); 4128 CU_ASSERT(nvme_ctrlr1->resetting == true); 4129 CU_ASSERT(ctrlr1->is_failed == false); 4130 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4131 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4132 4133 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4134 poll_thread_times(0, 2); 4135 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4136 4137 poll_thread_times(0, 1); 4138 CU_ASSERT(io_path11->qpair->qpair != NULL); 4139 CU_ASSERT(io_path21->qpair->qpair == NULL); 4140 4141 poll_thread_times(1, 1); 4142 CU_ASSERT(io_path11->qpair->qpair != NULL); 4143 CU_ASSERT(io_path21->qpair->qpair != NULL); 4144 4145 poll_thread_times(0, 2); 4146 CU_ASSERT(nvme_ctrlr1->resetting == true); 4147 poll_thread_times(1, 1); 4148 CU_ASSERT(nvme_ctrlr1->resetting == true); 4149 poll_thread_times(0, 2); 4150 CU_ASSERT(nvme_ctrlr1->resetting == false); 4151 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4152 CU_ASSERT(first_bio->io_path == io_path12); 4153 CU_ASSERT(nvme_ctrlr2->resetting == true); 4154 4155 poll_thread_times(0, 3); 4156 CU_ASSERT(io_path12->qpair->qpair == NULL); 4157 CU_ASSERT(io_path22->qpair->qpair != NULL); 4158 4159 poll_thread_times(1, 2); 4160 CU_ASSERT(io_path12->qpair->qpair == NULL); 4161 CU_ASSERT(io_path22->qpair->qpair == NULL); 4162 CU_ASSERT(ctrlr2->is_failed == true); 4163 4164 poll_thread_times(0, 1); 4165 CU_ASSERT(nvme_ctrlr2->resetting == true); 4166 CU_ASSERT(ctrlr2->is_failed == false); 4167 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4168 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4169 4170 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4171 poll_thread_times(0, 2); 4172 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4173 4174 poll_thread_times(0, 1); 4175 CU_ASSERT(io_path12->qpair->qpair != NULL); 4176 CU_ASSERT(io_path22->qpair->qpair == NULL); 4177 4178 poll_thread_times(1, 2); 4179 CU_ASSERT(io_path12->qpair->qpair != NULL); 4180 CU_ASSERT(io_path22->qpair->qpair != NULL); 4181 4182 poll_thread_times(0, 2); 4183 CU_ASSERT(nvme_ctrlr2->resetting == true); 4184 poll_thread_times(1, 1); 4185 CU_ASSERT(nvme_ctrlr2->resetting == true); 4186 poll_thread_times(0, 2); 4187 CU_ASSERT(first_bio->io_path == NULL); 4188 CU_ASSERT(nvme_ctrlr2->resetting == false); 4189 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4190 4191 poll_threads(); 4192 4193 /* There is a race between two reset requests from bdev_io. 4194 * 4195 * The first reset request is submitted on thread 0, and the second reset 4196 * request is submitted on thread 1 while the first is resetting ctrlr1. 4197 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4198 * both reset requests go to ctrlr2. The first comes earlier than the second. 4199 * The second is pending on ctrlr2 again. After the first completes resetting 4200 * ctrl2, both complete successfully. 4201 */ 4202 ctrlr1->is_failed = true; 4203 curr_path1->last_failed_tsc = spdk_get_ticks(); 4204 ctrlr2->is_failed = true; 4205 curr_path2->last_failed_tsc = spdk_get_ticks(); 4206 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4207 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4208 4209 set_thread(0); 4210 4211 bdev_nvme_submit_request(ch1, first_bdev_io); 4212 4213 set_thread(1); 4214 4215 bdev_nvme_submit_request(ch2, second_bdev_io); 4216 4217 CU_ASSERT(nvme_ctrlr1->resetting == true); 4218 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4219 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4220 4221 poll_threads(); 4222 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4223 poll_threads(); 4224 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4225 poll_threads(); 4226 4227 CU_ASSERT(ctrlr1->is_failed == false); 4228 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4229 CU_ASSERT(ctrlr2->is_failed == false); 4230 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4231 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4232 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4233 4234 set_thread(0); 4235 4236 spdk_put_io_channel(ch1); 4237 4238 set_thread(1); 4239 4240 spdk_put_io_channel(ch2); 4241 4242 poll_threads(); 4243 4244 set_thread(0); 4245 4246 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4247 CU_ASSERT(rc == 0); 4248 4249 poll_threads(); 4250 spdk_delay_us(1000); 4251 poll_threads(); 4252 4253 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4254 4255 free(first_bdev_io); 4256 free(second_bdev_io); 4257 } 4258 4259 static void 4260 test_find_io_path(void) 4261 { 4262 struct nvme_bdev_channel nbdev_ch = { 4263 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4264 }; 4265 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4266 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4267 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4268 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4269 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4270 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4271 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4272 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4273 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4274 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4275 4276 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4277 4278 /* Test if io_path whose ANA state is not accessible is excluded. */ 4279 4280 nvme_qpair1.qpair = &qpair1; 4281 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4282 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4283 4284 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4285 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4286 4287 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4288 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4289 4290 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4291 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4292 4293 nbdev_ch.current_io_path = NULL; 4294 4295 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4296 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4297 4298 nbdev_ch.current_io_path = NULL; 4299 4300 /* Test if io_path whose qpair is resetting is excluded. */ 4301 4302 nvme_qpair1.qpair = NULL; 4303 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4304 4305 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4306 4307 /* Test if ANA optimized state or the first found ANA non-optimized state 4308 * is prioritized. 4309 */ 4310 4311 nvme_qpair1.qpair = &qpair1; 4312 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4313 nvme_qpair2.qpair = &qpair2; 4314 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4315 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4316 4317 nbdev_ch.current_io_path = NULL; 4318 4319 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4320 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4321 4322 nbdev_ch.current_io_path = NULL; 4323 } 4324 4325 static void 4326 test_retry_io_if_ana_state_is_updating(void) 4327 { 4328 struct nvme_path_id path = {}; 4329 struct nvme_ctrlr_opts opts = {}; 4330 struct spdk_nvme_ctrlr *ctrlr; 4331 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4332 struct nvme_ctrlr *nvme_ctrlr; 4333 const int STRING_SIZE = 32; 4334 const char *attached_names[STRING_SIZE]; 4335 struct nvme_bdev *bdev; 4336 struct nvme_ns *nvme_ns; 4337 struct spdk_bdev_io *bdev_io1; 4338 struct spdk_io_channel *ch; 4339 struct nvme_bdev_channel *nbdev_ch; 4340 struct nvme_io_path *io_path; 4341 struct nvme_qpair *nvme_qpair; 4342 int rc; 4343 4344 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4345 ut_init_trid(&path.trid); 4346 4347 set_thread(0); 4348 4349 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4350 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4351 4352 g_ut_attach_ctrlr_status = 0; 4353 g_ut_attach_bdev_count = 1; 4354 4355 opts.ctrlr_loss_timeout_sec = -1; 4356 opts.reconnect_delay_sec = 1; 4357 4358 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4359 attach_ctrlr_done, NULL, NULL, &opts, false); 4360 CU_ASSERT(rc == 0); 4361 4362 spdk_delay_us(1000); 4363 poll_threads(); 4364 4365 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4366 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4367 4368 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4369 CU_ASSERT(nvme_ctrlr != NULL); 4370 4371 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4372 CU_ASSERT(bdev != NULL); 4373 4374 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4375 CU_ASSERT(nvme_ns != NULL); 4376 4377 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4378 ut_bdev_io_set_buf(bdev_io1); 4379 4380 ch = spdk_get_io_channel(bdev); 4381 SPDK_CU_ASSERT_FATAL(ch != NULL); 4382 4383 nbdev_ch = spdk_io_channel_get_ctx(ch); 4384 4385 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4386 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4387 4388 nvme_qpair = io_path->qpair; 4389 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4390 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4391 4392 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4393 4394 /* If qpair is connected, I/O should succeed. */ 4395 bdev_io1->internal.in_submit_request = true; 4396 4397 bdev_nvme_submit_request(ch, bdev_io1); 4398 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4399 4400 poll_threads(); 4401 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4402 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4403 4404 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4405 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4406 nbdev_ch->current_io_path = NULL; 4407 4408 bdev_io1->internal.in_submit_request = true; 4409 4410 bdev_nvme_submit_request(ch, bdev_io1); 4411 4412 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4413 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4414 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4415 4416 /* ANA state became accessible while I/O was queued. */ 4417 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4418 4419 spdk_delay_us(1000000); 4420 4421 poll_thread_times(0, 1); 4422 4423 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4424 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4425 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4426 4427 poll_threads(); 4428 4429 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4430 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4431 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4432 4433 free(bdev_io1); 4434 4435 spdk_put_io_channel(ch); 4436 4437 poll_threads(); 4438 4439 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4440 CU_ASSERT(rc == 0); 4441 4442 poll_threads(); 4443 spdk_delay_us(1000); 4444 poll_threads(); 4445 4446 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4447 } 4448 4449 static void 4450 test_retry_io_for_io_path_error(void) 4451 { 4452 struct nvme_path_id path1 = {}, path2 = {}; 4453 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4454 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4455 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4456 const int STRING_SIZE = 32; 4457 const char *attached_names[STRING_SIZE]; 4458 struct nvme_bdev *bdev; 4459 struct nvme_ns *nvme_ns1, *nvme_ns2; 4460 struct spdk_bdev_io *bdev_io; 4461 struct nvme_bdev_io *bio; 4462 struct spdk_io_channel *ch; 4463 struct nvme_bdev_channel *nbdev_ch; 4464 struct nvme_io_path *io_path1, *io_path2; 4465 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4466 struct ut_nvme_req *req; 4467 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4468 int rc; 4469 4470 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4471 ut_init_trid(&path1.trid); 4472 ut_init_trid2(&path2.trid); 4473 4474 g_opts.bdev_retry_count = 1; 4475 4476 set_thread(0); 4477 4478 g_ut_attach_ctrlr_status = 0; 4479 g_ut_attach_bdev_count = 1; 4480 4481 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4482 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4483 4484 ctrlr1->ns[0].uuid = &uuid1; 4485 4486 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4487 attach_ctrlr_done, NULL, NULL, NULL, true); 4488 CU_ASSERT(rc == 0); 4489 4490 spdk_delay_us(1000); 4491 poll_threads(); 4492 4493 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4494 poll_threads(); 4495 4496 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4497 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4498 4499 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4500 CU_ASSERT(nvme_ctrlr1 != NULL); 4501 4502 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4503 CU_ASSERT(bdev != NULL); 4504 4505 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4506 CU_ASSERT(nvme_ns1 != NULL); 4507 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4508 4509 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4510 ut_bdev_io_set_buf(bdev_io); 4511 4512 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4513 4514 ch = spdk_get_io_channel(bdev); 4515 SPDK_CU_ASSERT_FATAL(ch != NULL); 4516 4517 nbdev_ch = spdk_io_channel_get_ctx(ch); 4518 4519 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4520 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4521 4522 nvme_qpair1 = io_path1->qpair; 4523 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4524 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4525 4526 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4527 4528 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4529 bdev_io->internal.in_submit_request = true; 4530 4531 bdev_nvme_submit_request(ch, bdev_io); 4532 4533 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4534 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4535 4536 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4537 SPDK_CU_ASSERT_FATAL(req != NULL); 4538 4539 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4540 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4541 req->cpl.status.dnr = 1; 4542 4543 poll_thread_times(0, 1); 4544 4545 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4546 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4547 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4548 4549 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4550 bdev_io->internal.in_submit_request = true; 4551 4552 bdev_nvme_submit_request(ch, bdev_io); 4553 4554 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4555 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4556 4557 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4558 SPDK_CU_ASSERT_FATAL(req != NULL); 4559 4560 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4561 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4562 4563 poll_thread_times(0, 1); 4564 4565 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4566 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4567 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4568 4569 poll_threads(); 4570 4571 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4572 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4573 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4574 4575 /* Add io_path2 dynamically, and create a multipath configuration. */ 4576 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4577 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4578 4579 ctrlr2->ns[0].uuid = &uuid1; 4580 4581 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4582 attach_ctrlr_done, NULL, NULL, NULL, true); 4583 CU_ASSERT(rc == 0); 4584 4585 spdk_delay_us(1000); 4586 poll_threads(); 4587 4588 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4589 poll_threads(); 4590 4591 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4592 CU_ASSERT(nvme_ctrlr2 != NULL); 4593 4594 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4595 CU_ASSERT(nvme_ns2 != NULL); 4596 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4597 4598 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4599 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4600 4601 nvme_qpair2 = io_path2->qpair; 4602 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4603 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4604 4605 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4606 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4607 * So after a retry, I/O is submitted to io_path2 and should succeed. 4608 */ 4609 bdev_io->internal.in_submit_request = true; 4610 4611 bdev_nvme_submit_request(ch, bdev_io); 4612 4613 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4614 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4615 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4616 4617 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4618 SPDK_CU_ASSERT_FATAL(req != NULL); 4619 4620 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4621 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4622 4623 poll_thread_times(0, 1); 4624 4625 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4626 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4627 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4628 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4629 4630 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4631 nvme_qpair1->qpair = NULL; 4632 4633 poll_threads(); 4634 4635 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4636 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4637 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4638 4639 free(bdev_io); 4640 4641 spdk_put_io_channel(ch); 4642 4643 poll_threads(); 4644 4645 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4646 CU_ASSERT(rc == 0); 4647 4648 poll_threads(); 4649 spdk_delay_us(1000); 4650 poll_threads(); 4651 4652 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4653 4654 g_opts.bdev_retry_count = 0; 4655 } 4656 4657 static void 4658 test_retry_io_count(void) 4659 { 4660 struct nvme_path_id path = {}; 4661 struct spdk_nvme_ctrlr *ctrlr; 4662 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4663 struct nvme_ctrlr *nvme_ctrlr; 4664 const int STRING_SIZE = 32; 4665 const char *attached_names[STRING_SIZE]; 4666 struct nvme_bdev *bdev; 4667 struct nvme_ns *nvme_ns; 4668 struct spdk_bdev_io *bdev_io; 4669 struct nvme_bdev_io *bio; 4670 struct spdk_io_channel *ch; 4671 struct nvme_bdev_channel *nbdev_ch; 4672 struct nvme_io_path *io_path; 4673 struct nvme_qpair *nvme_qpair; 4674 struct ut_nvme_req *req; 4675 int rc; 4676 4677 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4678 ut_init_trid(&path.trid); 4679 4680 set_thread(0); 4681 4682 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4683 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4684 4685 g_ut_attach_ctrlr_status = 0; 4686 g_ut_attach_bdev_count = 1; 4687 4688 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4689 attach_ctrlr_done, NULL, NULL, NULL, false); 4690 CU_ASSERT(rc == 0); 4691 4692 spdk_delay_us(1000); 4693 poll_threads(); 4694 4695 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4696 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4697 4698 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4699 CU_ASSERT(nvme_ctrlr != NULL); 4700 4701 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4702 CU_ASSERT(bdev != NULL); 4703 4704 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4705 CU_ASSERT(nvme_ns != NULL); 4706 4707 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4708 ut_bdev_io_set_buf(bdev_io); 4709 4710 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4711 4712 ch = spdk_get_io_channel(bdev); 4713 SPDK_CU_ASSERT_FATAL(ch != NULL); 4714 4715 nbdev_ch = spdk_io_channel_get_ctx(ch); 4716 4717 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4718 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4719 4720 nvme_qpair = io_path->qpair; 4721 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4722 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4723 4724 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4725 4726 /* If I/O is aborted by request, it should not be retried. */ 4727 g_opts.bdev_retry_count = 1; 4728 4729 bdev_io->internal.in_submit_request = true; 4730 4731 bdev_nvme_submit_request(ch, bdev_io); 4732 4733 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4734 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4735 4736 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4737 SPDK_CU_ASSERT_FATAL(req != NULL); 4738 4739 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4740 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4741 4742 poll_thread_times(0, 1); 4743 4744 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4745 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4746 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4747 4748 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4749 * the failed I/O should not be retried. 4750 */ 4751 g_opts.bdev_retry_count = 4; 4752 4753 bdev_io->internal.in_submit_request = true; 4754 4755 bdev_nvme_submit_request(ch, bdev_io); 4756 4757 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4758 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4759 4760 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4761 SPDK_CU_ASSERT_FATAL(req != NULL); 4762 4763 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4764 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4765 bio->retry_count = 4; 4766 4767 poll_thread_times(0, 1); 4768 4769 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4770 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4771 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4772 4773 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4774 g_opts.bdev_retry_count = -1; 4775 4776 bdev_io->internal.in_submit_request = true; 4777 4778 bdev_nvme_submit_request(ch, bdev_io); 4779 4780 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4781 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4782 4783 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4784 SPDK_CU_ASSERT_FATAL(req != NULL); 4785 4786 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4787 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4788 bio->retry_count = 4; 4789 4790 poll_thread_times(0, 1); 4791 4792 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4793 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4794 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4795 4796 poll_threads(); 4797 4798 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4799 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4800 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4801 4802 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4803 * the failed I/O should be retried. 4804 */ 4805 g_opts.bdev_retry_count = 4; 4806 4807 bdev_io->internal.in_submit_request = true; 4808 4809 bdev_nvme_submit_request(ch, bdev_io); 4810 4811 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4812 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4813 4814 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4815 SPDK_CU_ASSERT_FATAL(req != NULL); 4816 4817 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4818 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4819 bio->retry_count = 3; 4820 4821 poll_thread_times(0, 1); 4822 4823 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4824 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4825 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4826 4827 poll_threads(); 4828 4829 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4830 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4831 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4832 4833 free(bdev_io); 4834 4835 spdk_put_io_channel(ch); 4836 4837 poll_threads(); 4838 4839 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4840 CU_ASSERT(rc == 0); 4841 4842 poll_threads(); 4843 spdk_delay_us(1000); 4844 poll_threads(); 4845 4846 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4847 4848 g_opts.bdev_retry_count = 0; 4849 } 4850 4851 static void 4852 test_concurrent_read_ana_log_page(void) 4853 { 4854 struct spdk_nvme_transport_id trid = {}; 4855 struct spdk_nvme_ctrlr *ctrlr; 4856 struct nvme_ctrlr *nvme_ctrlr; 4857 const int STRING_SIZE = 32; 4858 const char *attached_names[STRING_SIZE]; 4859 int rc; 4860 4861 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4862 ut_init_trid(&trid); 4863 4864 set_thread(0); 4865 4866 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4867 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4868 4869 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4870 4871 g_ut_attach_ctrlr_status = 0; 4872 g_ut_attach_bdev_count = 1; 4873 4874 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4875 attach_ctrlr_done, NULL, NULL, NULL, false); 4876 CU_ASSERT(rc == 0); 4877 4878 spdk_delay_us(1000); 4879 poll_threads(); 4880 4881 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4882 poll_threads(); 4883 4884 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4885 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4886 4887 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4888 4889 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4890 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4891 4892 /* Following read request should be rejected. */ 4893 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4894 4895 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4896 4897 set_thread(1); 4898 4899 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4900 4901 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4902 4903 /* Reset request while reading ANA log page should not be rejected. */ 4904 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4905 CU_ASSERT(rc == 0); 4906 4907 poll_threads(); 4908 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4909 poll_threads(); 4910 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4911 poll_threads(); 4912 4913 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4914 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4915 4916 /* Read ANA log page while resetting ctrlr should be rejected. */ 4917 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4918 CU_ASSERT(rc == 0); 4919 4920 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4921 4922 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4923 4924 poll_threads(); 4925 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4926 poll_threads(); 4927 4928 set_thread(0); 4929 4930 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4931 CU_ASSERT(rc == 0); 4932 4933 poll_threads(); 4934 spdk_delay_us(1000); 4935 poll_threads(); 4936 4937 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4938 } 4939 4940 static void 4941 test_retry_io_for_ana_error(void) 4942 { 4943 struct nvme_path_id path = {}; 4944 struct spdk_nvme_ctrlr *ctrlr; 4945 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4946 struct nvme_ctrlr *nvme_ctrlr; 4947 const int STRING_SIZE = 32; 4948 const char *attached_names[STRING_SIZE]; 4949 struct nvme_bdev *bdev; 4950 struct nvme_ns *nvme_ns; 4951 struct spdk_bdev_io *bdev_io; 4952 struct nvme_bdev_io *bio; 4953 struct spdk_io_channel *ch; 4954 struct nvme_bdev_channel *nbdev_ch; 4955 struct nvme_io_path *io_path; 4956 struct nvme_qpair *nvme_qpair; 4957 struct ut_nvme_req *req; 4958 uint64_t now; 4959 int rc; 4960 4961 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4962 ut_init_trid(&path.trid); 4963 4964 g_opts.bdev_retry_count = 1; 4965 4966 set_thread(0); 4967 4968 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4969 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4970 4971 g_ut_attach_ctrlr_status = 0; 4972 g_ut_attach_bdev_count = 1; 4973 4974 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4975 attach_ctrlr_done, NULL, NULL, NULL, false); 4976 CU_ASSERT(rc == 0); 4977 4978 spdk_delay_us(1000); 4979 poll_threads(); 4980 4981 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4982 poll_threads(); 4983 4984 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4985 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4986 4987 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4988 CU_ASSERT(nvme_ctrlr != NULL); 4989 4990 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4991 CU_ASSERT(bdev != NULL); 4992 4993 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4994 CU_ASSERT(nvme_ns != NULL); 4995 4996 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4997 ut_bdev_io_set_buf(bdev_io); 4998 4999 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5000 5001 ch = spdk_get_io_channel(bdev); 5002 SPDK_CU_ASSERT_FATAL(ch != NULL); 5003 5004 nbdev_ch = spdk_io_channel_get_ctx(ch); 5005 5006 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5007 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5008 5009 nvme_qpair = io_path->qpair; 5010 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5011 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5012 5013 now = spdk_get_ticks(); 5014 5015 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5016 5017 /* If I/O got ANA error, it should be queued, the corresponding namespace 5018 * should be freezed and its ANA state should be updated. 5019 */ 5020 bdev_io->internal.in_submit_request = true; 5021 5022 bdev_nvme_submit_request(ch, bdev_io); 5023 5024 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5025 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5026 5027 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5028 SPDK_CU_ASSERT_FATAL(req != NULL); 5029 5030 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5031 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5032 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5033 5034 poll_thread_times(0, 1); 5035 5036 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5037 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5038 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5039 /* I/O should be retried immediately. */ 5040 CU_ASSERT(bio->retry_ticks == now); 5041 CU_ASSERT(nvme_ns->ana_state_updating == true); 5042 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5043 5044 poll_threads(); 5045 5046 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5047 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5048 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5049 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5050 /* I/O should be retried after a second if no I/O path was found but 5051 * any I/O path may become available. 5052 */ 5053 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5054 5055 /* Namespace should be unfreezed after completing to update its ANA state. */ 5056 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5057 poll_threads(); 5058 5059 CU_ASSERT(nvme_ns->ana_state_updating == false); 5060 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5061 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5062 5063 /* Retry the queued I/O should succeed. */ 5064 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5065 poll_threads(); 5066 5067 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5068 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5069 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5070 5071 free(bdev_io); 5072 5073 spdk_put_io_channel(ch); 5074 5075 poll_threads(); 5076 5077 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5078 CU_ASSERT(rc == 0); 5079 5080 poll_threads(); 5081 spdk_delay_us(1000); 5082 poll_threads(); 5083 5084 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5085 5086 g_opts.bdev_retry_count = 0; 5087 } 5088 5089 static void 5090 test_check_io_error_resiliency_params(void) 5091 { 5092 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5093 * 3rd parameter is fast_io_fail_timeout_sec. 5094 */ 5095 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5096 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5097 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5098 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5099 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5100 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5101 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5102 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5103 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5104 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5105 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5106 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5107 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5108 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5109 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5110 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5111 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5112 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5113 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5114 } 5115 5116 static void 5117 test_retry_io_if_ctrlr_is_resetting(void) 5118 { 5119 struct nvme_path_id path = {}; 5120 struct nvme_ctrlr_opts opts = {}; 5121 struct spdk_nvme_ctrlr *ctrlr; 5122 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5123 struct nvme_ctrlr *nvme_ctrlr; 5124 const int STRING_SIZE = 32; 5125 const char *attached_names[STRING_SIZE]; 5126 struct nvme_bdev *bdev; 5127 struct nvme_ns *nvme_ns; 5128 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5129 struct spdk_io_channel *ch; 5130 struct nvme_bdev_channel *nbdev_ch; 5131 struct nvme_io_path *io_path; 5132 struct nvme_qpair *nvme_qpair; 5133 int rc; 5134 5135 g_opts.bdev_retry_count = 1; 5136 5137 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5138 ut_init_trid(&path.trid); 5139 5140 set_thread(0); 5141 5142 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5143 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5144 5145 g_ut_attach_ctrlr_status = 0; 5146 g_ut_attach_bdev_count = 1; 5147 5148 opts.ctrlr_loss_timeout_sec = -1; 5149 opts.reconnect_delay_sec = 1; 5150 5151 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5152 attach_ctrlr_done, NULL, NULL, &opts, false); 5153 CU_ASSERT(rc == 0); 5154 5155 spdk_delay_us(1000); 5156 poll_threads(); 5157 5158 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5159 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5160 5161 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5162 CU_ASSERT(nvme_ctrlr != NULL); 5163 5164 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5165 CU_ASSERT(bdev != NULL); 5166 5167 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5168 CU_ASSERT(nvme_ns != NULL); 5169 5170 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5171 ut_bdev_io_set_buf(bdev_io1); 5172 5173 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5174 ut_bdev_io_set_buf(bdev_io2); 5175 5176 ch = spdk_get_io_channel(bdev); 5177 SPDK_CU_ASSERT_FATAL(ch != NULL); 5178 5179 nbdev_ch = spdk_io_channel_get_ctx(ch); 5180 5181 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5182 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5183 5184 nvme_qpair = io_path->qpair; 5185 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5186 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5187 5188 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5189 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5190 5191 /* If qpair is connected, I/O should succeed. */ 5192 bdev_io1->internal.in_submit_request = true; 5193 5194 bdev_nvme_submit_request(ch, bdev_io1); 5195 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5196 5197 poll_threads(); 5198 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5199 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5200 5201 /* If qpair is disconnected, it is freed and then reconnected via resetting 5202 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5203 * while resetting the nvme_ctrlr. 5204 */ 5205 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5206 ctrlr->is_failed = true; 5207 5208 poll_thread_times(0, 5); 5209 5210 CU_ASSERT(nvme_qpair->qpair == NULL); 5211 CU_ASSERT(nvme_ctrlr->resetting == true); 5212 CU_ASSERT(ctrlr->is_failed == false); 5213 5214 bdev_io1->internal.in_submit_request = true; 5215 5216 bdev_nvme_submit_request(ch, bdev_io1); 5217 5218 spdk_delay_us(1); 5219 5220 bdev_io2->internal.in_submit_request = true; 5221 5222 bdev_nvme_submit_request(ch, bdev_io2); 5223 5224 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5225 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5226 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5227 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5228 5229 poll_threads(); 5230 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5231 poll_threads(); 5232 5233 CU_ASSERT(nvme_qpair->qpair != NULL); 5234 CU_ASSERT(nvme_ctrlr->resetting == false); 5235 5236 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5237 5238 poll_thread_times(0, 1); 5239 5240 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5241 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5242 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5243 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5244 5245 poll_threads(); 5246 5247 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5248 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5249 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5250 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5251 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5252 5253 spdk_delay_us(1); 5254 5255 poll_thread_times(0, 1); 5256 5257 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5258 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5259 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5260 5261 poll_threads(); 5262 5263 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5264 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5265 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5266 5267 free(bdev_io1); 5268 free(bdev_io2); 5269 5270 spdk_put_io_channel(ch); 5271 5272 poll_threads(); 5273 5274 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5275 CU_ASSERT(rc == 0); 5276 5277 poll_threads(); 5278 spdk_delay_us(1000); 5279 poll_threads(); 5280 5281 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5282 5283 g_opts.bdev_retry_count = 0; 5284 } 5285 5286 static void 5287 test_reconnect_ctrlr(void) 5288 { 5289 struct spdk_nvme_transport_id trid = {}; 5290 struct spdk_nvme_ctrlr ctrlr = {}; 5291 struct nvme_ctrlr *nvme_ctrlr; 5292 struct spdk_io_channel *ch1, *ch2; 5293 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5294 int rc; 5295 5296 ut_init_trid(&trid); 5297 TAILQ_INIT(&ctrlr.active_io_qpairs); 5298 5299 set_thread(0); 5300 5301 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5302 CU_ASSERT(rc == 0); 5303 5304 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5305 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5306 5307 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5308 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5309 5310 ch1 = spdk_get_io_channel(nvme_ctrlr); 5311 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5312 5313 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5314 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5315 5316 set_thread(1); 5317 5318 ch2 = spdk_get_io_channel(nvme_ctrlr); 5319 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5320 5321 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5322 5323 /* Reset starts from thread 1. */ 5324 set_thread(1); 5325 5326 /* The reset should fail and a reconnect timer should be registered. */ 5327 ctrlr.fail_reset = true; 5328 ctrlr.is_failed = true; 5329 5330 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5331 CU_ASSERT(rc == 0); 5332 CU_ASSERT(nvme_ctrlr->resetting == true); 5333 CU_ASSERT(ctrlr.is_failed == true); 5334 5335 poll_threads(); 5336 5337 CU_ASSERT(nvme_ctrlr->resetting == false); 5338 CU_ASSERT(ctrlr.is_failed == false); 5339 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5340 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5341 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5342 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5343 5344 /* A new reset starts from thread 0. */ 5345 set_thread(1); 5346 5347 /* The reset should cancel the reconnect timer and should start from reconnection. 5348 * Then, the reset should fail and a reconnect timer should be registered again. 5349 */ 5350 ctrlr.fail_reset = true; 5351 ctrlr.is_failed = true; 5352 5353 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5354 CU_ASSERT(rc == 0); 5355 CU_ASSERT(nvme_ctrlr->resetting == true); 5356 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5357 CU_ASSERT(ctrlr.is_failed == true); 5358 5359 poll_threads(); 5360 5361 CU_ASSERT(nvme_ctrlr->resetting == false); 5362 CU_ASSERT(ctrlr.is_failed == false); 5363 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5364 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5365 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5366 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5367 5368 /* Then a reconnect retry should suceeed. */ 5369 ctrlr.fail_reset = false; 5370 5371 spdk_delay_us(SPDK_SEC_TO_USEC); 5372 poll_thread_times(0, 1); 5373 5374 CU_ASSERT(nvme_ctrlr->resetting == true); 5375 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5376 5377 poll_threads(); 5378 5379 CU_ASSERT(nvme_ctrlr->resetting == false); 5380 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5381 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5382 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5383 5384 /* The reset should fail and a reconnect timer should be registered. */ 5385 ctrlr.fail_reset = true; 5386 ctrlr.is_failed = true; 5387 5388 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5389 CU_ASSERT(rc == 0); 5390 CU_ASSERT(nvme_ctrlr->resetting == true); 5391 CU_ASSERT(ctrlr.is_failed == true); 5392 5393 poll_threads(); 5394 5395 CU_ASSERT(nvme_ctrlr->resetting == false); 5396 CU_ASSERT(ctrlr.is_failed == false); 5397 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5398 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5399 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5400 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5401 5402 /* Then a reconnect retry should still fail. */ 5403 spdk_delay_us(SPDK_SEC_TO_USEC); 5404 poll_thread_times(0, 1); 5405 5406 CU_ASSERT(nvme_ctrlr->resetting == true); 5407 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5408 5409 poll_threads(); 5410 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5411 poll_threads(); 5412 5413 CU_ASSERT(nvme_ctrlr->resetting == false); 5414 CU_ASSERT(ctrlr.is_failed == false); 5415 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5416 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5417 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5418 5419 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5420 spdk_delay_us(SPDK_SEC_TO_USEC); 5421 poll_threads(); 5422 5423 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5424 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5425 CU_ASSERT(nvme_ctrlr->destruct == true); 5426 5427 spdk_put_io_channel(ch2); 5428 5429 set_thread(0); 5430 5431 spdk_put_io_channel(ch1); 5432 5433 poll_threads(); 5434 spdk_delay_us(1000); 5435 poll_threads(); 5436 5437 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5438 } 5439 5440 static struct nvme_path_id * 5441 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5442 const struct spdk_nvme_transport_id *trid) 5443 { 5444 struct nvme_path_id *p; 5445 5446 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5447 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5448 break; 5449 } 5450 } 5451 5452 return p; 5453 } 5454 5455 static void 5456 test_retry_failover_ctrlr(void) 5457 { 5458 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5459 struct spdk_nvme_ctrlr ctrlr = {}; 5460 struct nvme_ctrlr *nvme_ctrlr = NULL; 5461 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5462 struct spdk_io_channel *ch; 5463 struct nvme_ctrlr_channel *ctrlr_ch; 5464 int rc; 5465 5466 ut_init_trid(&trid1); 5467 ut_init_trid2(&trid2); 5468 ut_init_trid3(&trid3); 5469 TAILQ_INIT(&ctrlr.active_io_qpairs); 5470 5471 set_thread(0); 5472 5473 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5474 CU_ASSERT(rc == 0); 5475 5476 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5477 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5478 5479 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5480 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5481 5482 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5483 CU_ASSERT(rc == 0); 5484 5485 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5486 CU_ASSERT(rc == 0); 5487 5488 ch = spdk_get_io_channel(nvme_ctrlr); 5489 SPDK_CU_ASSERT_FATAL(ch != NULL); 5490 5491 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5492 5493 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5494 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5495 CU_ASSERT(path_id1->last_failed_tsc == 0); 5496 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5497 5498 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5499 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5500 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5501 5502 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5503 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5504 5505 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5506 * and a reconnect timer is started. */ 5507 ctrlr.fail_reset = true; 5508 ctrlr.is_failed = true; 5509 5510 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5511 CU_ASSERT(rc == 0); 5512 5513 poll_threads(); 5514 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5515 poll_threads(); 5516 5517 CU_ASSERT(nvme_ctrlr->resetting == false); 5518 CU_ASSERT(ctrlr.is_failed == false); 5519 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5520 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5521 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5522 CU_ASSERT(path_id1->last_failed_tsc != 0); 5523 5524 CU_ASSERT(path_id2->last_failed_tsc != 0); 5525 CU_ASSERT(path_id3->last_failed_tsc != 0); 5526 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5527 5528 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5529 * switched to trid2 but reset is not started. 5530 */ 5531 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5532 CU_ASSERT(rc == -EALREADY); 5533 5534 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5535 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5536 5537 CU_ASSERT(nvme_ctrlr->resetting == false); 5538 5539 /* If reconnect succeeds, trid2 should be the active path_id */ 5540 ctrlr.fail_reset = false; 5541 5542 spdk_delay_us(SPDK_SEC_TO_USEC); 5543 poll_thread_times(0, 1); 5544 5545 CU_ASSERT(nvme_ctrlr->resetting == true); 5546 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5547 5548 poll_threads(); 5549 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5550 poll_threads(); 5551 5552 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5553 CU_ASSERT(path_id2->last_failed_tsc == 0); 5554 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5555 CU_ASSERT(nvme_ctrlr->resetting == false); 5556 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5557 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5558 5559 spdk_put_io_channel(ch); 5560 5561 poll_threads(); 5562 5563 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5564 CU_ASSERT(rc == 0); 5565 5566 poll_threads(); 5567 spdk_delay_us(1000); 5568 poll_threads(); 5569 5570 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5571 } 5572 5573 static void 5574 test_fail_path(void) 5575 { 5576 struct nvme_path_id path = {}; 5577 struct nvme_ctrlr_opts opts = {}; 5578 struct spdk_nvme_ctrlr *ctrlr; 5579 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5580 struct nvme_ctrlr *nvme_ctrlr; 5581 const int STRING_SIZE = 32; 5582 const char *attached_names[STRING_SIZE]; 5583 struct nvme_bdev *bdev; 5584 struct nvme_ns *nvme_ns; 5585 struct spdk_bdev_io *bdev_io; 5586 struct spdk_io_channel *ch; 5587 struct nvme_bdev_channel *nbdev_ch; 5588 struct nvme_io_path *io_path; 5589 struct nvme_ctrlr_channel *ctrlr_ch; 5590 int rc; 5591 5592 /* The test scenario is the following. 5593 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5594 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5595 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5596 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5597 * comes first. The queued I/O is failed. 5598 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5599 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5600 */ 5601 5602 g_opts.bdev_retry_count = 1; 5603 5604 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5605 ut_init_trid(&path.trid); 5606 5607 set_thread(0); 5608 5609 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5610 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5611 5612 g_ut_attach_ctrlr_status = 0; 5613 g_ut_attach_bdev_count = 1; 5614 5615 opts.ctrlr_loss_timeout_sec = 4; 5616 opts.reconnect_delay_sec = 1; 5617 opts.fast_io_fail_timeout_sec = 2; 5618 5619 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5620 attach_ctrlr_done, NULL, NULL, &opts, false); 5621 CU_ASSERT(rc == 0); 5622 5623 spdk_delay_us(1000); 5624 poll_threads(); 5625 5626 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5627 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5628 5629 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5630 CU_ASSERT(nvme_ctrlr != NULL); 5631 5632 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5633 CU_ASSERT(bdev != NULL); 5634 5635 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5636 CU_ASSERT(nvme_ns != NULL); 5637 5638 ch = spdk_get_io_channel(bdev); 5639 SPDK_CU_ASSERT_FATAL(ch != NULL); 5640 5641 nbdev_ch = spdk_io_channel_get_ctx(ch); 5642 5643 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5644 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5645 5646 ctrlr_ch = io_path->qpair->ctrlr_ch; 5647 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5648 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5649 5650 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5651 ut_bdev_io_set_buf(bdev_io); 5652 5653 5654 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5655 ctrlr->fail_reset = true; 5656 ctrlr->is_failed = true; 5657 5658 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5659 CU_ASSERT(rc == 0); 5660 CU_ASSERT(nvme_ctrlr->resetting == true); 5661 CU_ASSERT(ctrlr->is_failed == true); 5662 5663 poll_threads(); 5664 5665 CU_ASSERT(nvme_ctrlr->resetting == false); 5666 CU_ASSERT(ctrlr->is_failed == false); 5667 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5668 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5669 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5670 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5671 5672 /* I/O should be queued. */ 5673 bdev_io->internal.in_submit_request = true; 5674 5675 bdev_nvme_submit_request(ch, bdev_io); 5676 5677 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5678 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5679 5680 /* After a second, the I/O should be still queued and the ctrlr should be 5681 * still recovering. 5682 */ 5683 spdk_delay_us(SPDK_SEC_TO_USEC); 5684 poll_threads(); 5685 5686 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5687 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5688 5689 CU_ASSERT(nvme_ctrlr->resetting == false); 5690 CU_ASSERT(ctrlr->is_failed == false); 5691 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5692 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5693 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5694 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5695 5696 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5697 5698 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5699 spdk_delay_us(SPDK_SEC_TO_USEC); 5700 poll_threads(); 5701 5702 CU_ASSERT(nvme_ctrlr->resetting == false); 5703 CU_ASSERT(ctrlr->is_failed == false); 5704 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5705 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5706 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5707 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5708 5709 /* Then within a second, pending I/O should be failed. */ 5710 spdk_delay_us(SPDK_SEC_TO_USEC); 5711 poll_threads(); 5712 5713 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5714 poll_threads(); 5715 5716 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5717 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5718 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5719 5720 /* Another I/O submission should be failed immediately. */ 5721 bdev_io->internal.in_submit_request = true; 5722 5723 bdev_nvme_submit_request(ch, bdev_io); 5724 5725 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5726 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5727 5728 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5729 * be deleted. 5730 */ 5731 spdk_delay_us(SPDK_SEC_TO_USEC); 5732 poll_threads(); 5733 5734 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5735 poll_threads(); 5736 5737 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5738 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5739 CU_ASSERT(nvme_ctrlr->destruct == true); 5740 5741 spdk_put_io_channel(ch); 5742 5743 poll_threads(); 5744 spdk_delay_us(1000); 5745 poll_threads(); 5746 5747 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5748 5749 free(bdev_io); 5750 5751 g_opts.bdev_retry_count = 0; 5752 } 5753 5754 static void 5755 test_nvme_ns_cmp(void) 5756 { 5757 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5758 5759 nvme_ns1.id = 0; 5760 nvme_ns2.id = UINT32_MAX; 5761 5762 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5763 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5764 } 5765 5766 static void 5767 test_ana_transition(void) 5768 { 5769 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5770 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5771 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5772 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5773 5774 /* case 1: ANA transition timedout is canceled. */ 5775 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5776 nvme_ns.ana_transition_timedout = true; 5777 5778 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5779 5780 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5781 5782 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5783 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5784 5785 /* case 2: ANATT timer is kept. */ 5786 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5787 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5788 &nvme_ns, 5789 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5790 5791 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5792 5793 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5794 5795 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5796 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5797 5798 /* case 3: ANATT timer is stopped. */ 5799 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5800 5801 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5802 5803 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5804 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5805 5806 /* ANATT timer is started. */ 5807 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5808 5809 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5810 5811 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5812 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5813 5814 /* ANATT timer is expired. */ 5815 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5816 5817 poll_threads(); 5818 5819 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5820 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5821 } 5822 5823 static void 5824 _set_preferred_path_cb(void *cb_arg, int rc) 5825 { 5826 bool *done = cb_arg; 5827 5828 *done = true; 5829 } 5830 5831 static void 5832 test_set_preferred_path(void) 5833 { 5834 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5835 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5836 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5837 const int STRING_SIZE = 32; 5838 const char *attached_names[STRING_SIZE]; 5839 struct nvme_bdev *bdev; 5840 struct spdk_io_channel *ch; 5841 struct nvme_bdev_channel *nbdev_ch; 5842 struct nvme_io_path *io_path; 5843 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5844 const struct spdk_nvme_ctrlr_data *cdata; 5845 bool done; 5846 int rc; 5847 5848 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5849 ut_init_trid(&path1.trid); 5850 ut_init_trid2(&path2.trid); 5851 ut_init_trid3(&path3.trid); 5852 g_ut_attach_ctrlr_status = 0; 5853 g_ut_attach_bdev_count = 1; 5854 5855 set_thread(0); 5856 5857 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5858 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5859 5860 ctrlr1->ns[0].uuid = &uuid1; 5861 5862 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5863 attach_ctrlr_done, NULL, NULL, NULL, true); 5864 CU_ASSERT(rc == 0); 5865 5866 spdk_delay_us(1000); 5867 poll_threads(); 5868 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5869 poll_threads(); 5870 5871 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5872 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5873 5874 ctrlr2->ns[0].uuid = &uuid1; 5875 5876 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5877 attach_ctrlr_done, NULL, NULL, NULL, true); 5878 CU_ASSERT(rc == 0); 5879 5880 spdk_delay_us(1000); 5881 poll_threads(); 5882 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5883 poll_threads(); 5884 5885 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5886 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5887 5888 ctrlr3->ns[0].uuid = &uuid1; 5889 5890 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5891 attach_ctrlr_done, NULL, NULL, NULL, true); 5892 CU_ASSERT(rc == 0); 5893 5894 spdk_delay_us(1000); 5895 poll_threads(); 5896 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5897 poll_threads(); 5898 5899 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5900 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5901 5902 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5903 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5904 5905 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5906 5907 ch = spdk_get_io_channel(bdev); 5908 SPDK_CU_ASSERT_FATAL(ch != NULL); 5909 nbdev_ch = spdk_io_channel_get_ctx(ch); 5910 5911 io_path = bdev_nvme_find_io_path(nbdev_ch); 5912 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5913 5914 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5915 5916 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5917 * should return io_path to ctrlr2. 5918 */ 5919 5920 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5921 done = false; 5922 5923 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5924 5925 poll_threads(); 5926 CU_ASSERT(done == true); 5927 5928 io_path = bdev_nvme_find_io_path(nbdev_ch); 5929 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5930 5931 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5932 5933 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5934 * acquired, find_io_path() should return io_path to ctrlr3. 5935 */ 5936 5937 spdk_put_io_channel(ch); 5938 5939 poll_threads(); 5940 5941 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5942 done = false; 5943 5944 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5945 5946 poll_threads(); 5947 CU_ASSERT(done == true); 5948 5949 ch = spdk_get_io_channel(bdev); 5950 SPDK_CU_ASSERT_FATAL(ch != NULL); 5951 nbdev_ch = spdk_io_channel_get_ctx(ch); 5952 5953 io_path = bdev_nvme_find_io_path(nbdev_ch); 5954 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5955 5956 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5957 5958 spdk_put_io_channel(ch); 5959 5960 poll_threads(); 5961 5962 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5963 CU_ASSERT(rc == 0); 5964 5965 poll_threads(); 5966 spdk_delay_us(1000); 5967 poll_threads(); 5968 5969 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5970 } 5971 5972 static void 5973 test_find_next_io_path(void) 5974 { 5975 struct nvme_bdev_channel nbdev_ch = { 5976 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5977 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5978 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 5979 }; 5980 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5981 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5982 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 5983 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5984 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5985 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5986 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5987 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5988 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5989 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5990 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5991 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5992 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 5993 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5994 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5995 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5996 5997 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 5998 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 5999 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6000 6001 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6002 * is covered in test_find_io_path. 6003 */ 6004 6005 nbdev_ch.current_io_path = &io_path2; 6006 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6007 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6008 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6009 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6010 6011 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6012 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6013 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6014 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6015 6016 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6017 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6018 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6019 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6020 6021 nbdev_ch.current_io_path = &io_path3; 6022 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6023 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6024 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6025 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6026 6027 /* Test if next io_path is selected according to rr_min_io */ 6028 6029 nbdev_ch.current_io_path = NULL; 6030 nbdev_ch.rr_min_io = 2; 6031 nbdev_ch.rr_counter = 0; 6032 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6033 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6034 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6035 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6036 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6037 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6038 6039 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6040 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6041 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6042 } 6043 6044 static void 6045 test_find_io_path_min_qd(void) 6046 { 6047 struct nvme_bdev_channel nbdev_ch = { 6048 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6049 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6050 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6051 }; 6052 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6053 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6054 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6055 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6056 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6057 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6058 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6059 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6060 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6061 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6062 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6063 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6064 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6065 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6066 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6067 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6068 6069 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6070 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6071 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6072 6073 /* Test if the minumum io_outstanding or the ANA optimized state is 6074 * prioritized when using least queue depth selector 6075 */ 6076 qpair1.num_outstanding_reqs = 2; 6077 qpair2.num_outstanding_reqs = 1; 6078 qpair3.num_outstanding_reqs = 0; 6079 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6080 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6081 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6082 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6083 6084 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6085 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6086 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6087 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6088 6089 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6090 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6091 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6092 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6093 6094 qpair2.num_outstanding_reqs = 4; 6095 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6096 } 6097 6098 static void 6099 test_disable_auto_failback(void) 6100 { 6101 struct nvme_path_id path1 = {}, path2 = {}; 6102 struct nvme_ctrlr_opts opts = {}; 6103 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6104 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6105 struct nvme_ctrlr *nvme_ctrlr1; 6106 const int STRING_SIZE = 32; 6107 const char *attached_names[STRING_SIZE]; 6108 struct nvme_bdev *bdev; 6109 struct spdk_io_channel *ch; 6110 struct nvme_bdev_channel *nbdev_ch; 6111 struct nvme_io_path *io_path; 6112 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6113 const struct spdk_nvme_ctrlr_data *cdata; 6114 bool done; 6115 int rc; 6116 6117 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6118 ut_init_trid(&path1.trid); 6119 ut_init_trid2(&path2.trid); 6120 g_ut_attach_ctrlr_status = 0; 6121 g_ut_attach_bdev_count = 1; 6122 6123 g_opts.disable_auto_failback = true; 6124 6125 opts.ctrlr_loss_timeout_sec = -1; 6126 opts.reconnect_delay_sec = 1; 6127 6128 set_thread(0); 6129 6130 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6131 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6132 6133 ctrlr1->ns[0].uuid = &uuid1; 6134 6135 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6136 attach_ctrlr_done, NULL, NULL, &opts, true); 6137 CU_ASSERT(rc == 0); 6138 6139 spdk_delay_us(1000); 6140 poll_threads(); 6141 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6142 poll_threads(); 6143 6144 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6145 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6146 6147 ctrlr2->ns[0].uuid = &uuid1; 6148 6149 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6150 attach_ctrlr_done, NULL, NULL, &opts, true); 6151 CU_ASSERT(rc == 0); 6152 6153 spdk_delay_us(1000); 6154 poll_threads(); 6155 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6156 poll_threads(); 6157 6158 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6159 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6160 6161 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6162 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6163 6164 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6165 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6166 6167 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6168 6169 ch = spdk_get_io_channel(bdev); 6170 SPDK_CU_ASSERT_FATAL(ch != NULL); 6171 nbdev_ch = spdk_io_channel_get_ctx(ch); 6172 6173 io_path = bdev_nvme_find_io_path(nbdev_ch); 6174 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6175 6176 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6177 6178 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6179 ctrlr1->fail_reset = true; 6180 ctrlr1->is_failed = true; 6181 6182 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6183 6184 poll_threads(); 6185 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6186 poll_threads(); 6187 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6188 poll_threads(); 6189 6190 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6191 6192 io_path = bdev_nvme_find_io_path(nbdev_ch); 6193 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6194 6195 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6196 6197 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6198 * Hence, io_path to ctrlr2 should still be used. 6199 */ 6200 ctrlr1->fail_reset = false; 6201 6202 spdk_delay_us(SPDK_SEC_TO_USEC); 6203 poll_threads(); 6204 6205 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6206 6207 io_path = bdev_nvme_find_io_path(nbdev_ch); 6208 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6209 6210 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6211 6212 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6213 * be used again. 6214 */ 6215 6216 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6217 done = false; 6218 6219 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6220 6221 poll_threads(); 6222 CU_ASSERT(done == true); 6223 6224 io_path = bdev_nvme_find_io_path(nbdev_ch); 6225 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6226 6227 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6228 6229 spdk_put_io_channel(ch); 6230 6231 poll_threads(); 6232 6233 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6234 CU_ASSERT(rc == 0); 6235 6236 poll_threads(); 6237 spdk_delay_us(1000); 6238 poll_threads(); 6239 6240 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6241 6242 g_opts.disable_auto_failback = false; 6243 } 6244 6245 static void 6246 ut_set_multipath_policy_done(void *cb_arg, int rc) 6247 { 6248 int *done = cb_arg; 6249 6250 SPDK_CU_ASSERT_FATAL(done != NULL); 6251 *done = rc; 6252 } 6253 6254 static void 6255 test_set_multipath_policy(void) 6256 { 6257 struct nvme_path_id path1 = {}, path2 = {}; 6258 struct nvme_ctrlr_opts opts = {}; 6259 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6260 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6261 const int STRING_SIZE = 32; 6262 const char *attached_names[STRING_SIZE]; 6263 struct nvme_bdev *bdev; 6264 struct spdk_io_channel *ch; 6265 struct nvme_bdev_channel *nbdev_ch; 6266 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6267 int done; 6268 int rc; 6269 6270 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6271 ut_init_trid(&path1.trid); 6272 ut_init_trid2(&path2.trid); 6273 g_ut_attach_ctrlr_status = 0; 6274 g_ut_attach_bdev_count = 1; 6275 6276 g_opts.disable_auto_failback = true; 6277 6278 opts.ctrlr_loss_timeout_sec = -1; 6279 opts.reconnect_delay_sec = 1; 6280 6281 set_thread(0); 6282 6283 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6284 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6285 6286 ctrlr1->ns[0].uuid = &uuid1; 6287 6288 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6289 attach_ctrlr_done, NULL, NULL, &opts, true); 6290 CU_ASSERT(rc == 0); 6291 6292 spdk_delay_us(1000); 6293 poll_threads(); 6294 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6295 poll_threads(); 6296 6297 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6298 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6299 6300 ctrlr2->ns[0].uuid = &uuid1; 6301 6302 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6303 attach_ctrlr_done, NULL, NULL, &opts, true); 6304 CU_ASSERT(rc == 0); 6305 6306 spdk_delay_us(1000); 6307 poll_threads(); 6308 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6309 poll_threads(); 6310 6311 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6312 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6313 6314 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6315 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6316 6317 /* If multipath policy is updated before getting any I/O channel, 6318 * an new I/O channel should have the update. 6319 */ 6320 done = -1; 6321 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6322 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6323 ut_set_multipath_policy_done, &done); 6324 poll_threads(); 6325 CU_ASSERT(done == 0); 6326 6327 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6328 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6329 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6330 6331 ch = spdk_get_io_channel(bdev); 6332 SPDK_CU_ASSERT_FATAL(ch != NULL); 6333 nbdev_ch = spdk_io_channel_get_ctx(ch); 6334 6335 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6336 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6337 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6338 6339 /* If multipath policy is updated while a I/O channel is active, 6340 * the update should be applied to the I/O channel immediately. 6341 */ 6342 done = -1; 6343 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6344 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6345 ut_set_multipath_policy_done, &done); 6346 poll_threads(); 6347 CU_ASSERT(done == 0); 6348 6349 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6350 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6351 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6352 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6353 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6354 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6355 6356 spdk_put_io_channel(ch); 6357 6358 poll_threads(); 6359 6360 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6361 CU_ASSERT(rc == 0); 6362 6363 poll_threads(); 6364 spdk_delay_us(1000); 6365 poll_threads(); 6366 6367 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6368 } 6369 6370 static void 6371 test_uuid_generation(void) 6372 { 6373 uint32_t nsid1 = 1, nsid2 = 2; 6374 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6375 char sn3[21] = " "; 6376 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6377 struct spdk_uuid uuid1, uuid2; 6378 int rc; 6379 6380 /* Test case 1: 6381 * Serial numbers are the same, nsids are different. 6382 * Compare two generated UUID - they should be different. */ 6383 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6384 CU_ASSERT(rc == 0); 6385 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6386 CU_ASSERT(rc == 0); 6387 6388 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6389 6390 /* Test case 2: 6391 * Serial numbers differ only by one character, nsids are the same. 6392 * Compare two generated UUID - they should be different. */ 6393 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6394 CU_ASSERT(rc == 0); 6395 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6396 CU_ASSERT(rc == 0); 6397 6398 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6399 6400 /* Test case 3: 6401 * Serial number comprises only of space characters. 6402 * Validate the generated UUID. */ 6403 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6404 CU_ASSERT(rc == 0); 6405 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6406 6407 } 6408 6409 static void 6410 test_retry_io_to_same_path(void) 6411 { 6412 struct nvme_path_id path1 = {}, path2 = {}; 6413 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6414 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6415 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6416 const int STRING_SIZE = 32; 6417 const char *attached_names[STRING_SIZE]; 6418 struct nvme_bdev *bdev; 6419 struct spdk_bdev_io *bdev_io; 6420 struct nvme_bdev_io *bio; 6421 struct spdk_io_channel *ch; 6422 struct nvme_bdev_channel *nbdev_ch; 6423 struct nvme_io_path *io_path1, *io_path2; 6424 struct ut_nvme_req *req; 6425 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6426 int done; 6427 int rc; 6428 6429 g_opts.nvme_ioq_poll_period_us = 1; 6430 6431 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6432 ut_init_trid(&path1.trid); 6433 ut_init_trid2(&path2.trid); 6434 g_ut_attach_ctrlr_status = 0; 6435 g_ut_attach_bdev_count = 1; 6436 6437 set_thread(0); 6438 6439 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6440 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6441 6442 ctrlr1->ns[0].uuid = &uuid1; 6443 6444 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6445 attach_ctrlr_done, NULL, NULL, NULL, true); 6446 CU_ASSERT(rc == 0); 6447 6448 spdk_delay_us(1000); 6449 poll_threads(); 6450 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6451 poll_threads(); 6452 6453 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6454 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6455 6456 ctrlr2->ns[0].uuid = &uuid1; 6457 6458 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6459 attach_ctrlr_done, NULL, NULL, NULL, true); 6460 CU_ASSERT(rc == 0); 6461 6462 spdk_delay_us(1000); 6463 poll_threads(); 6464 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6465 poll_threads(); 6466 6467 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6468 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6469 6470 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6471 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6472 6473 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 6474 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6475 6476 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6477 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6478 6479 done = -1; 6480 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6481 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6482 poll_threads(); 6483 CU_ASSERT(done == 0); 6484 6485 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6486 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6487 CU_ASSERT(bdev->rr_min_io == 1); 6488 6489 ch = spdk_get_io_channel(bdev); 6490 SPDK_CU_ASSERT_FATAL(ch != NULL); 6491 nbdev_ch = spdk_io_channel_get_ctx(ch); 6492 6493 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6494 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6495 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6496 6497 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6498 ut_bdev_io_set_buf(bdev_io); 6499 6500 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6501 6502 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6503 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6504 6505 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6506 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6507 6508 /* The 1st I/O should be submitted to io_path1. */ 6509 bdev_io->internal.in_submit_request = true; 6510 6511 bdev_nvme_submit_request(ch, bdev_io); 6512 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6513 CU_ASSERT(bio->io_path == io_path1); 6514 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6515 6516 spdk_delay_us(1); 6517 6518 poll_threads(); 6519 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6520 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6521 6522 /* The 2nd I/O should be submitted to io_path2 because the path selection 6523 * policy is round-robin. 6524 */ 6525 bdev_io->internal.in_submit_request = true; 6526 6527 bdev_nvme_submit_request(ch, bdev_io); 6528 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6529 CU_ASSERT(bio->io_path == io_path2); 6530 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6531 6532 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6533 SPDK_CU_ASSERT_FATAL(req != NULL); 6534 6535 /* Set retry count to non-zero. */ 6536 g_opts.bdev_retry_count = 2; 6537 6538 /* Inject an I/O error. */ 6539 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6540 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6541 6542 /* The 2nd I/O should be queued to nbdev_ch. */ 6543 spdk_delay_us(1); 6544 poll_thread_times(0, 1); 6545 6546 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6547 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6548 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6549 6550 /* The 2nd I/O should keep caching io_path2. */ 6551 CU_ASSERT(bio->io_path == io_path2); 6552 6553 /* The 2nd I/O should be submitted to io_path2 again. */ 6554 poll_thread_times(0, 1); 6555 6556 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6557 CU_ASSERT(bio->io_path == io_path2); 6558 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6559 6560 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6561 SPDK_CU_ASSERT_FATAL(req != NULL); 6562 6563 /* Inject an I/O error again. */ 6564 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6565 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6566 req->cpl.status.crd = 1; 6567 6568 ctrlr2->cdata.crdt[1] = 1; 6569 6570 /* The 2nd I/O should be queued to nbdev_ch. */ 6571 spdk_delay_us(1); 6572 poll_thread_times(0, 1); 6573 6574 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6575 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6576 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6577 6578 /* The 2nd I/O should keep caching io_path2. */ 6579 CU_ASSERT(bio->io_path == io_path2); 6580 6581 /* Detach ctrlr2 dynamically. */ 6582 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6583 CU_ASSERT(rc == 0); 6584 6585 spdk_delay_us(1000); 6586 poll_threads(); 6587 spdk_delay_us(1000); 6588 poll_threads(); 6589 spdk_delay_us(1000); 6590 poll_threads(); 6591 spdk_delay_us(1000); 6592 poll_threads(); 6593 6594 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 6595 6596 poll_threads(); 6597 spdk_delay_us(100000); 6598 poll_threads(); 6599 spdk_delay_us(1); 6600 poll_threads(); 6601 6602 /* The 2nd I/O should succeed by io_path1. */ 6603 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6604 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6605 CU_ASSERT(bio->io_path == io_path1); 6606 6607 free(bdev_io); 6608 6609 spdk_put_io_channel(ch); 6610 6611 poll_threads(); 6612 spdk_delay_us(1); 6613 poll_threads(); 6614 6615 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6616 CU_ASSERT(rc == 0); 6617 6618 poll_threads(); 6619 spdk_delay_us(1000); 6620 poll_threads(); 6621 6622 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6623 6624 g_opts.nvme_ioq_poll_period_us = 0; 6625 g_opts.bdev_retry_count = 0; 6626 } 6627 6628 /* This case is to verify a fix for a complex race condition that 6629 * failover is lost if fabric connect command gets timeout while 6630 * controller is being reset. 6631 */ 6632 static void 6633 test_race_between_reset_and_disconnected(void) 6634 { 6635 struct spdk_nvme_transport_id trid = {}; 6636 struct spdk_nvme_ctrlr ctrlr = {}; 6637 struct nvme_ctrlr *nvme_ctrlr = NULL; 6638 struct nvme_path_id *curr_trid; 6639 struct spdk_io_channel *ch1, *ch2; 6640 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6641 int rc; 6642 6643 ut_init_trid(&trid); 6644 TAILQ_INIT(&ctrlr.active_io_qpairs); 6645 6646 set_thread(0); 6647 6648 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6649 CU_ASSERT(rc == 0); 6650 6651 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6652 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6653 6654 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6655 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6656 6657 ch1 = spdk_get_io_channel(nvme_ctrlr); 6658 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6659 6660 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6661 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6662 6663 set_thread(1); 6664 6665 ch2 = spdk_get_io_channel(nvme_ctrlr); 6666 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6667 6668 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6669 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6670 6671 /* Reset starts from thread 1. */ 6672 set_thread(1); 6673 6674 nvme_ctrlr->resetting = false; 6675 curr_trid->last_failed_tsc = spdk_get_ticks(); 6676 ctrlr.is_failed = true; 6677 6678 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6679 CU_ASSERT(rc == 0); 6680 CU_ASSERT(nvme_ctrlr->resetting == true); 6681 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6682 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6683 6684 poll_thread_times(0, 3); 6685 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6686 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6687 6688 poll_thread_times(0, 1); 6689 poll_thread_times(1, 1); 6690 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6691 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6692 CU_ASSERT(ctrlr.is_failed == true); 6693 6694 poll_thread_times(1, 1); 6695 poll_thread_times(0, 1); 6696 CU_ASSERT(ctrlr.is_failed == false); 6697 CU_ASSERT(ctrlr.adminq.is_connected == false); 6698 6699 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6700 poll_thread_times(0, 2); 6701 CU_ASSERT(ctrlr.adminq.is_connected == true); 6702 6703 poll_thread_times(0, 1); 6704 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6705 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6706 6707 poll_thread_times(1, 1); 6708 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6709 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6710 CU_ASSERT(nvme_ctrlr->resetting == true); 6711 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6712 6713 poll_thread_times(0, 2); 6714 CU_ASSERT(nvme_ctrlr->resetting == true); 6715 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6716 poll_thread_times(1, 1); 6717 CU_ASSERT(nvme_ctrlr->resetting == true); 6718 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6719 6720 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6721 * 6722 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6723 * connect command is executed. If fabric connect command gets timeout, 6724 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6725 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6726 * 6727 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6728 */ 6729 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6730 CU_ASSERT(rc == -EINPROGRESS); 6731 CU_ASSERT(nvme_ctrlr->resetting == true); 6732 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6733 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6734 6735 poll_thread_times(0, 1); 6736 6737 CU_ASSERT(nvme_ctrlr->resetting == true); 6738 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6739 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6740 6741 poll_threads(); 6742 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6743 poll_threads(); 6744 6745 CU_ASSERT(nvme_ctrlr->resetting == false); 6746 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6747 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6748 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6749 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6750 6751 spdk_put_io_channel(ch2); 6752 6753 set_thread(0); 6754 6755 spdk_put_io_channel(ch1); 6756 6757 poll_threads(); 6758 6759 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6760 CU_ASSERT(rc == 0); 6761 6762 poll_threads(); 6763 spdk_delay_us(1000); 6764 poll_threads(); 6765 6766 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6767 } 6768 static void 6769 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6770 { 6771 int *_rc = (int *)cb_arg; 6772 6773 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6774 *_rc = rc; 6775 } 6776 6777 static void 6778 test_ctrlr_op_rpc(void) 6779 { 6780 struct spdk_nvme_transport_id trid = {}; 6781 struct spdk_nvme_ctrlr ctrlr = {}; 6782 struct nvme_ctrlr *nvme_ctrlr = NULL; 6783 struct nvme_path_id *curr_trid; 6784 struct spdk_io_channel *ch1, *ch2; 6785 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6786 int ctrlr_op_rc; 6787 int rc; 6788 6789 ut_init_trid(&trid); 6790 TAILQ_INIT(&ctrlr.active_io_qpairs); 6791 6792 set_thread(0); 6793 6794 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6795 CU_ASSERT(rc == 0); 6796 6797 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6798 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6799 6800 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6801 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6802 6803 ch1 = spdk_get_io_channel(nvme_ctrlr); 6804 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6805 6806 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6807 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6808 6809 set_thread(1); 6810 6811 ch2 = spdk_get_io_channel(nvme_ctrlr); 6812 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6813 6814 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6815 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6816 6817 /* Reset starts from thread 1. */ 6818 set_thread(1); 6819 6820 /* Case 1: ctrlr is already being destructed. */ 6821 nvme_ctrlr->destruct = true; 6822 ctrlr_op_rc = 0; 6823 6824 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6825 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6826 6827 poll_threads(); 6828 6829 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6830 6831 /* Case 2: reset is in progress. */ 6832 nvme_ctrlr->destruct = false; 6833 nvme_ctrlr->resetting = true; 6834 ctrlr_op_rc = 0; 6835 6836 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6837 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6838 6839 poll_threads(); 6840 6841 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6842 6843 /* Case 3: reset completes successfully. */ 6844 nvme_ctrlr->resetting = false; 6845 curr_trid->last_failed_tsc = spdk_get_ticks(); 6846 ctrlr.is_failed = true; 6847 ctrlr_op_rc = -1; 6848 6849 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6850 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6851 6852 CU_ASSERT(nvme_ctrlr->resetting == true); 6853 CU_ASSERT(ctrlr_op_rc == -1); 6854 6855 poll_threads(); 6856 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6857 poll_threads(); 6858 6859 CU_ASSERT(nvme_ctrlr->resetting == false); 6860 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6861 CU_ASSERT(ctrlr.is_failed == false); 6862 CU_ASSERT(ctrlr_op_rc == 0); 6863 6864 /* Case 4: invalid operation. */ 6865 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6866 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6867 6868 poll_threads(); 6869 6870 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6871 6872 spdk_put_io_channel(ch2); 6873 6874 set_thread(0); 6875 6876 spdk_put_io_channel(ch1); 6877 6878 poll_threads(); 6879 6880 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6881 CU_ASSERT(rc == 0); 6882 6883 poll_threads(); 6884 spdk_delay_us(1000); 6885 poll_threads(); 6886 6887 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6888 } 6889 6890 static void 6891 test_bdev_ctrlr_op_rpc(void) 6892 { 6893 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6894 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6895 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6896 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6897 struct nvme_path_id *curr_trid1, *curr_trid2; 6898 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6899 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6900 int ctrlr_op_rc; 6901 int rc; 6902 6903 ut_init_trid(&trid1); 6904 ut_init_trid2(&trid2); 6905 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6906 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6907 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6908 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6909 ctrlr1.cdata.cntlid = 1; 6910 ctrlr2.cdata.cntlid = 2; 6911 ctrlr1.adminq.is_connected = true; 6912 ctrlr2.adminq.is_connected = true; 6913 6914 set_thread(0); 6915 6916 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6917 CU_ASSERT(rc == 0); 6918 6919 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6920 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6921 6922 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1); 6923 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6924 6925 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6926 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6927 6928 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6929 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6930 6931 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6932 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6933 6934 set_thread(1); 6935 6936 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6937 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6938 6939 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6940 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6941 6942 set_thread(0); 6943 6944 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6945 CU_ASSERT(rc == 0); 6946 6947 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2); 6948 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6949 6950 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 6951 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 6952 6953 ch21 = spdk_get_io_channel(nvme_ctrlr2); 6954 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 6955 6956 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 6957 CU_ASSERT(ctrlr_ch21->qpair != NULL); 6958 6959 set_thread(1); 6960 6961 ch22 = spdk_get_io_channel(nvme_ctrlr2); 6962 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 6963 6964 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 6965 CU_ASSERT(ctrlr_ch22->qpair != NULL); 6966 6967 /* Reset starts from thread 1. */ 6968 set_thread(1); 6969 6970 nvme_ctrlr1->resetting = false; 6971 nvme_ctrlr2->resetting = false; 6972 curr_trid1->last_failed_tsc = spdk_get_ticks(); 6973 curr_trid2->last_failed_tsc = spdk_get_ticks(); 6974 ctrlr_op_rc = -1; 6975 6976 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 6977 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6978 6979 CU_ASSERT(nvme_ctrlr1->resetting == true); 6980 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6981 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6982 CU_ASSERT(nvme_ctrlr2->resetting == false); 6983 6984 poll_thread_times(0, 3); 6985 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6986 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 6987 6988 poll_thread_times(0, 1); 6989 poll_thread_times(1, 1); 6990 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6991 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 6992 6993 poll_thread_times(1, 1); 6994 poll_thread_times(0, 1); 6995 CU_ASSERT(ctrlr1.adminq.is_connected == false); 6996 6997 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6998 poll_thread_times(0, 2); 6999 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7000 7001 poll_thread_times(0, 1); 7002 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7003 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7004 7005 poll_thread_times(1, 1); 7006 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7007 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7008 CU_ASSERT(nvme_ctrlr1->resetting == true); 7009 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7010 7011 poll_thread_times(0, 2); 7012 poll_thread_times(1, 1); 7013 poll_thread_times(0, 1); 7014 poll_thread_times(1, 1); 7015 poll_thread_times(0, 1); 7016 poll_thread_times(1, 1); 7017 poll_thread_times(0, 1); 7018 7019 CU_ASSERT(nvme_ctrlr1->resetting == false); 7020 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7021 CU_ASSERT(nvme_ctrlr2->resetting == true); 7022 7023 poll_threads(); 7024 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7025 poll_threads(); 7026 7027 CU_ASSERT(nvme_ctrlr2->resetting == false); 7028 CU_ASSERT(ctrlr_op_rc == 0); 7029 7030 set_thread(1); 7031 7032 spdk_put_io_channel(ch12); 7033 spdk_put_io_channel(ch22); 7034 7035 set_thread(0); 7036 7037 spdk_put_io_channel(ch11); 7038 spdk_put_io_channel(ch21); 7039 7040 poll_threads(); 7041 7042 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7043 CU_ASSERT(rc == 0); 7044 7045 poll_threads(); 7046 spdk_delay_us(1000); 7047 poll_threads(); 7048 7049 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7050 } 7051 7052 static void 7053 test_disable_enable_ctrlr(void) 7054 { 7055 struct spdk_nvme_transport_id trid = {}; 7056 struct spdk_nvme_ctrlr ctrlr = {}; 7057 struct nvme_ctrlr *nvme_ctrlr = NULL; 7058 struct nvme_path_id *curr_trid; 7059 struct spdk_io_channel *ch1, *ch2; 7060 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7061 int rc; 7062 7063 ut_init_trid(&trid); 7064 TAILQ_INIT(&ctrlr.active_io_qpairs); 7065 ctrlr.adminq.is_connected = true; 7066 7067 set_thread(0); 7068 7069 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7070 CU_ASSERT(rc == 0); 7071 7072 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7073 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7074 7075 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7076 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7077 7078 ch1 = spdk_get_io_channel(nvme_ctrlr); 7079 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7080 7081 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7082 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7083 7084 set_thread(1); 7085 7086 ch2 = spdk_get_io_channel(nvme_ctrlr); 7087 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7088 7089 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7090 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7091 7092 /* Disable starts from thread 1. */ 7093 set_thread(1); 7094 7095 /* Case 1: ctrlr is already disabled. */ 7096 nvme_ctrlr->disabled = true; 7097 7098 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7099 CU_ASSERT(rc == -EALREADY); 7100 7101 /* Case 2: ctrlr is already being destructed. */ 7102 nvme_ctrlr->disabled = false; 7103 nvme_ctrlr->destruct = true; 7104 7105 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7106 CU_ASSERT(rc == -ENXIO); 7107 7108 /* Case 3: reset is in progress. */ 7109 nvme_ctrlr->destruct = false; 7110 nvme_ctrlr->resetting = true; 7111 7112 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7113 CU_ASSERT(rc == -EBUSY); 7114 7115 /* Case 4: disable completes successfully. */ 7116 nvme_ctrlr->resetting = false; 7117 7118 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7119 CU_ASSERT(rc == 0); 7120 CU_ASSERT(nvme_ctrlr->resetting == true); 7121 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7122 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7123 7124 poll_thread_times(0, 3); 7125 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7126 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7127 7128 poll_thread_times(0, 1); 7129 poll_thread_times(1, 1); 7130 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7131 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7132 7133 poll_thread_times(1, 1); 7134 poll_thread_times(0, 1); 7135 CU_ASSERT(ctrlr.adminq.is_connected == false); 7136 poll_thread_times(1, 1); 7137 poll_thread_times(0, 1); 7138 poll_thread_times(1, 1); 7139 poll_thread_times(0, 1); 7140 CU_ASSERT(nvme_ctrlr->resetting == false); 7141 CU_ASSERT(nvme_ctrlr->disabled == true); 7142 7143 /* Case 5: enable completes successfully. */ 7144 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7145 CU_ASSERT(rc == 0); 7146 7147 CU_ASSERT(nvme_ctrlr->resetting == true); 7148 CU_ASSERT(nvme_ctrlr->disabled == false); 7149 7150 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7151 poll_thread_times(0, 2); 7152 CU_ASSERT(ctrlr.adminq.is_connected == true); 7153 7154 poll_thread_times(0, 1); 7155 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7156 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7157 7158 poll_thread_times(1, 1); 7159 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7160 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7161 CU_ASSERT(nvme_ctrlr->resetting == true); 7162 7163 poll_thread_times(0, 2); 7164 CU_ASSERT(nvme_ctrlr->resetting == true); 7165 poll_thread_times(1, 1); 7166 CU_ASSERT(nvme_ctrlr->resetting == true); 7167 poll_thread_times(0, 1); 7168 CU_ASSERT(nvme_ctrlr->resetting == false); 7169 7170 /* Case 6: ctrlr is already enabled. */ 7171 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7172 CU_ASSERT(rc == -EALREADY); 7173 7174 set_thread(0); 7175 7176 /* Case 7: disable cancels delayed reconnect. */ 7177 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7178 ctrlr.fail_reset = true; 7179 7180 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7181 CU_ASSERT(rc == 0); 7182 7183 poll_threads(); 7184 7185 CU_ASSERT(nvme_ctrlr->resetting == false); 7186 CU_ASSERT(ctrlr.is_failed == false); 7187 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7188 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7189 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7190 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7191 7192 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7193 CU_ASSERT(rc == 0); 7194 7195 CU_ASSERT(nvme_ctrlr->resetting == true); 7196 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7197 7198 poll_threads(); 7199 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7200 poll_threads(); 7201 7202 CU_ASSERT(nvme_ctrlr->resetting == false); 7203 CU_ASSERT(nvme_ctrlr->disabled == true); 7204 7205 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7206 CU_ASSERT(rc == 0); 7207 7208 CU_ASSERT(nvme_ctrlr->resetting == true); 7209 CU_ASSERT(nvme_ctrlr->disabled == false); 7210 7211 poll_threads(); 7212 7213 CU_ASSERT(nvme_ctrlr->resetting == false); 7214 7215 set_thread(1); 7216 7217 spdk_put_io_channel(ch2); 7218 7219 set_thread(0); 7220 7221 spdk_put_io_channel(ch1); 7222 7223 poll_threads(); 7224 7225 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7226 CU_ASSERT(rc == 0); 7227 7228 poll_threads(); 7229 spdk_delay_us(1000); 7230 poll_threads(); 7231 7232 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7233 } 7234 7235 static void 7236 ut_delete_done(void *ctx, int rc) 7237 { 7238 int *delete_done_rc = ctx; 7239 *delete_done_rc = rc; 7240 } 7241 7242 static void 7243 test_delete_ctrlr_done(void) 7244 { 7245 struct spdk_nvme_transport_id trid = {}; 7246 struct spdk_nvme_ctrlr ctrlr = {}; 7247 int delete_done_rc = 0xDEADBEEF; 7248 int rc; 7249 7250 ut_init_trid(&trid); 7251 7252 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7253 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7254 7255 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7256 CU_ASSERT(rc == 0); 7257 7258 for (int i = 0; i < 20; i++) { 7259 poll_threads(); 7260 if (delete_done_rc == 0) { 7261 break; 7262 } 7263 spdk_delay_us(1000); 7264 } 7265 7266 CU_ASSERT(delete_done_rc == 0); 7267 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7268 } 7269 7270 static void 7271 test_ns_remove_during_reset(void) 7272 { 7273 struct nvme_path_id path = {}; 7274 struct nvme_ctrlr_opts opts = {}; 7275 struct spdk_nvme_ctrlr *ctrlr; 7276 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7277 struct nvme_ctrlr *nvme_ctrlr; 7278 const int STRING_SIZE = 32; 7279 const char *attached_names[STRING_SIZE]; 7280 struct nvme_bdev *bdev; 7281 struct nvme_ns *nvme_ns; 7282 union spdk_nvme_async_event_completion event = {}; 7283 struct spdk_nvme_cpl cpl = {}; 7284 int rc; 7285 7286 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7287 ut_init_trid(&path.trid); 7288 7289 set_thread(0); 7290 7291 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7292 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7293 7294 g_ut_attach_ctrlr_status = 0; 7295 g_ut_attach_bdev_count = 1; 7296 7297 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7298 attach_ctrlr_done, NULL, NULL, &opts, false); 7299 CU_ASSERT(rc == 0); 7300 7301 spdk_delay_us(1000); 7302 poll_threads(); 7303 7304 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7305 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7306 7307 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 7308 CU_ASSERT(nvme_ctrlr != NULL); 7309 7310 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7311 CU_ASSERT(bdev != NULL); 7312 7313 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7314 CU_ASSERT(nvme_ns != NULL); 7315 7316 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7317 * but nvme_ns->ns should be NULL. 7318 */ 7319 7320 CU_ASSERT(ctrlr->ns[0].is_active == true); 7321 ctrlr->ns[0].is_active = false; 7322 7323 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7324 CU_ASSERT(rc == 0); 7325 7326 poll_threads(); 7327 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7328 poll_threads(); 7329 7330 CU_ASSERT(nvme_ctrlr->resetting == false); 7331 CU_ASSERT(ctrlr->adminq.is_connected == true); 7332 7333 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7334 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7335 CU_ASSERT(nvme_ns->bdev == bdev); 7336 CU_ASSERT(nvme_ns->ns == NULL); 7337 7338 /* Then, async event should fill nvme_ns->ns again. */ 7339 7340 ctrlr->ns[0].is_active = true; 7341 7342 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7343 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7344 cpl.cdw0 = event.raw; 7345 7346 aer_cb(nvme_ctrlr, &cpl); 7347 7348 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7349 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7350 CU_ASSERT(nvme_ns->bdev == bdev); 7351 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7352 7353 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7354 CU_ASSERT(rc == 0); 7355 7356 poll_threads(); 7357 spdk_delay_us(1000); 7358 poll_threads(); 7359 7360 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7361 } 7362 7363 int 7364 main(int argc, char **argv) 7365 { 7366 CU_pSuite suite = NULL; 7367 unsigned int num_failures; 7368 7369 CU_initialize_registry(); 7370 7371 suite = CU_add_suite("nvme", NULL, NULL); 7372 7373 CU_ADD_TEST(suite, test_create_ctrlr); 7374 CU_ADD_TEST(suite, test_reset_ctrlr); 7375 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7376 CU_ADD_TEST(suite, test_failover_ctrlr); 7377 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7378 CU_ADD_TEST(suite, test_pending_reset); 7379 CU_ADD_TEST(suite, test_attach_ctrlr); 7380 CU_ADD_TEST(suite, test_aer_cb); 7381 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7382 CU_ADD_TEST(suite, test_add_remove_trid); 7383 CU_ADD_TEST(suite, test_abort); 7384 CU_ADD_TEST(suite, test_get_io_qpair); 7385 CU_ADD_TEST(suite, test_bdev_unregister); 7386 CU_ADD_TEST(suite, test_compare_ns); 7387 CU_ADD_TEST(suite, test_init_ana_log_page); 7388 CU_ADD_TEST(suite, test_get_memory_domains); 7389 CU_ADD_TEST(suite, test_reconnect_qpair); 7390 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7391 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7392 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7393 CU_ADD_TEST(suite, test_admin_path); 7394 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7395 CU_ADD_TEST(suite, test_find_io_path); 7396 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7397 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7398 CU_ADD_TEST(suite, test_retry_io_count); 7399 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7400 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7401 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7402 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7403 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7404 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7405 CU_ADD_TEST(suite, test_fail_path); 7406 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7407 CU_ADD_TEST(suite, test_ana_transition); 7408 CU_ADD_TEST(suite, test_set_preferred_path); 7409 CU_ADD_TEST(suite, test_find_next_io_path); 7410 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7411 CU_ADD_TEST(suite, test_disable_auto_failback); 7412 CU_ADD_TEST(suite, test_set_multipath_policy); 7413 CU_ADD_TEST(suite, test_uuid_generation); 7414 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7415 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7416 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7417 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7418 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7419 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7420 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7421 7422 allocate_threads(3); 7423 set_thread(0); 7424 bdev_nvme_library_init(); 7425 init_accel(); 7426 7427 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7428 7429 set_thread(0); 7430 bdev_nvme_library_fini(); 7431 fini_accel(); 7432 free_threads(); 7433 7434 CU_cleanup_registry(); 7435 7436 return num_failures; 7437 } 7438