1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 65 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 66 67 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 68 69 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 70 enum spdk_bdev_reset_stat_mode mode)); 71 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 72 struct spdk_bdev_io_stat *add)); 73 74 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 75 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 76 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 77 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 78 79 int 80 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 81 struct spdk_memory_domain **domains, int array_size) 82 { 83 int i, min_array_size; 84 85 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 86 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 87 for (i = 0; i < min_array_size; i++) { 88 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 89 } 90 } 91 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 92 93 return 0; 94 } 95 96 struct spdk_io_channel * 97 spdk_accel_get_io_channel(void) 98 { 99 return spdk_get_io_channel(g_accel_p); 100 } 101 102 void 103 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 104 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 105 { 106 /* Avoid warning that opts is used uninitialised */ 107 memset(opts, 0, opts_size); 108 } 109 110 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 111 (struct spdk_nvme_ctrlr *ctrlr), NULL); 112 113 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 114 (const struct spdk_nvme_ctrlr *ctrlr), 0); 115 116 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 117 (struct spdk_nvme_ctrlr *ctrlr), NULL); 118 119 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 120 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 121 122 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 123 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 124 125 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 126 127 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 128 129 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 130 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 133 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 134 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 135 136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 137 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 138 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 139 140 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 141 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 142 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 143 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 144 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 145 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 146 147 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 148 size_t *size), 0); 149 150 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 151 152 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 153 154 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 155 156 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 157 158 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 159 160 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 161 162 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 163 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 166 167 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 168 char *name, size_t *size), 0); 169 170 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 171 (struct spdk_nvme_ns *ns), 0); 172 173 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 174 (const struct spdk_nvme_ctrlr *ctrlr), 0); 175 176 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 177 (struct spdk_nvme_ns *ns), 0); 178 179 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 180 (struct spdk_nvme_ns *ns), 0); 181 182 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 183 (struct spdk_nvme_ns *ns), 0); 184 185 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 186 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 187 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 188 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 189 190 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 191 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 192 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 193 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 194 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 195 196 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 197 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 198 void *payload, uint32_t payload_size, uint64_t slba, 199 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 200 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 201 202 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 203 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 204 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 205 206 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 208 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 209 210 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 211 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 212 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 213 214 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 215 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 216 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 217 218 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 219 220 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 221 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 222 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 223 224 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 225 (const struct spdk_nvme_status *status), NULL); 226 227 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 228 (const struct spdk_nvme_status *status), NULL); 229 230 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 231 232 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 233 234 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 235 236 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 237 238 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 239 240 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 241 struct iovec *iov, 242 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 243 DEFINE_STUB(spdk_accel_append_crc32c, int, 244 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 245 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 246 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 247 DEFINE_STUB_V(spdk_accel_sequence_finish, 248 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 249 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 250 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 251 252 struct ut_nvme_req { 253 uint16_t opc; 254 spdk_nvme_cmd_cb cb_fn; 255 void *cb_arg; 256 struct spdk_nvme_cpl cpl; 257 TAILQ_ENTRY(ut_nvme_req) tailq; 258 }; 259 260 struct spdk_nvme_ns { 261 struct spdk_nvme_ctrlr *ctrlr; 262 uint32_t id; 263 bool is_active; 264 struct spdk_uuid *uuid; 265 enum spdk_nvme_ana_state ana_state; 266 enum spdk_nvme_csi csi; 267 }; 268 269 struct spdk_nvme_qpair { 270 struct spdk_nvme_ctrlr *ctrlr; 271 uint8_t failure_reason; 272 bool is_connected; 273 bool in_completion_context; 274 bool delete_after_completion_context; 275 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 276 uint32_t num_outstanding_reqs; 277 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 278 struct spdk_nvme_poll_group *poll_group; 279 void *poll_group_tailq_head; 280 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 281 }; 282 283 struct spdk_nvme_ctrlr { 284 uint32_t num_ns; 285 struct spdk_nvme_ns *ns; 286 struct spdk_nvme_ns_data *nsdata; 287 struct spdk_nvme_qpair adminq; 288 struct spdk_nvme_ctrlr_data cdata; 289 bool attached; 290 bool is_failed; 291 bool fail_reset; 292 bool is_removed; 293 struct spdk_nvme_transport_id trid; 294 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 295 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 296 struct spdk_nvme_ctrlr_opts opts; 297 }; 298 299 struct spdk_nvme_poll_group { 300 void *ctx; 301 struct spdk_nvme_accel_fn_table accel_fn_table; 302 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 303 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 304 }; 305 306 struct spdk_nvme_probe_ctx { 307 struct spdk_nvme_transport_id trid; 308 void *cb_ctx; 309 spdk_nvme_attach_cb attach_cb; 310 struct spdk_nvme_ctrlr *init_ctrlr; 311 }; 312 313 uint32_t 314 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 315 { 316 uint32_t nsid; 317 318 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 319 if (ctrlr->ns[nsid - 1].is_active) { 320 return nsid; 321 } 322 } 323 324 return 0; 325 } 326 327 uint32_t 328 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 329 { 330 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 331 if (ctrlr->ns[nsid - 1].is_active) { 332 return nsid; 333 } 334 } 335 336 return 0; 337 } 338 339 uint32_t 340 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 341 { 342 return qpair->num_outstanding_reqs; 343 } 344 345 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 346 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 347 g_ut_attached_ctrlrs); 348 static int g_ut_attach_ctrlr_status; 349 static size_t g_ut_attach_bdev_count; 350 static int g_ut_register_bdev_status; 351 static struct spdk_bdev *g_ut_registered_bdev; 352 static uint16_t g_ut_cntlid; 353 static struct nvme_path_id g_any_path = {}; 354 355 static void 356 ut_init_trid(struct spdk_nvme_transport_id *trid) 357 { 358 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 359 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 360 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 361 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 362 } 363 364 static void 365 ut_init_trid2(struct spdk_nvme_transport_id *trid) 366 { 367 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 368 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 369 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 370 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 371 } 372 373 static void 374 ut_init_trid3(struct spdk_nvme_transport_id *trid) 375 { 376 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 377 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 378 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 379 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 380 } 381 382 static int 383 cmp_int(int a, int b) 384 { 385 return a - b; 386 } 387 388 int 389 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 390 const struct spdk_nvme_transport_id *trid2) 391 { 392 int cmp; 393 394 /* We assume trtype is TCP for now. */ 395 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 396 397 cmp = cmp_int(trid1->trtype, trid2->trtype); 398 if (cmp) { 399 return cmp; 400 } 401 402 cmp = strcasecmp(trid1->traddr, trid2->traddr); 403 if (cmp) { 404 return cmp; 405 } 406 407 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 408 if (cmp) { 409 return cmp; 410 } 411 412 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 413 if (cmp) { 414 return cmp; 415 } 416 417 cmp = strcmp(trid1->subnqn, trid2->subnqn); 418 if (cmp) { 419 return cmp; 420 } 421 422 return 0; 423 } 424 425 static struct spdk_nvme_ctrlr * 426 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 427 bool ana_reporting, bool multipath) 428 { 429 struct spdk_nvme_ctrlr *ctrlr; 430 uint32_t i; 431 432 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 433 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 434 /* There is a ctrlr whose trid matches. */ 435 return NULL; 436 } 437 } 438 439 ctrlr = calloc(1, sizeof(*ctrlr)); 440 if (ctrlr == NULL) { 441 return NULL; 442 } 443 444 ctrlr->attached = true; 445 ctrlr->adminq.ctrlr = ctrlr; 446 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 447 ctrlr->adminq.is_connected = true; 448 449 if (num_ns != 0) { 450 ctrlr->num_ns = num_ns; 451 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 452 if (ctrlr->ns == NULL) { 453 free(ctrlr); 454 return NULL; 455 } 456 457 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 458 if (ctrlr->nsdata == NULL) { 459 free(ctrlr->ns); 460 free(ctrlr); 461 return NULL; 462 } 463 464 for (i = 0; i < num_ns; i++) { 465 ctrlr->ns[i].id = i + 1; 466 ctrlr->ns[i].ctrlr = ctrlr; 467 ctrlr->ns[i].is_active = true; 468 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 469 ctrlr->nsdata[i].nsze = 1024; 470 ctrlr->nsdata[i].nmic.can_share = multipath; 471 } 472 473 ctrlr->cdata.nn = num_ns; 474 ctrlr->cdata.mnan = num_ns; 475 ctrlr->cdata.nanagrpid = num_ns; 476 } 477 478 ctrlr->cdata.cntlid = ++g_ut_cntlid; 479 ctrlr->cdata.cmic.multi_ctrlr = multipath; 480 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 481 ctrlr->trid = *trid; 482 TAILQ_INIT(&ctrlr->active_io_qpairs); 483 484 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 485 486 return ctrlr; 487 } 488 489 static void 490 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 491 { 492 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 493 494 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 495 free(ctrlr->nsdata); 496 free(ctrlr->ns); 497 free(ctrlr); 498 } 499 500 static int 501 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 502 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 503 { 504 struct ut_nvme_req *req; 505 506 req = calloc(1, sizeof(*req)); 507 if (req == NULL) { 508 return -ENOMEM; 509 } 510 511 req->opc = opc; 512 req->cb_fn = cb_fn; 513 req->cb_arg = cb_arg; 514 515 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 516 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 517 518 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 519 qpair->num_outstanding_reqs++; 520 521 return 0; 522 } 523 524 static struct ut_nvme_req * 525 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 526 { 527 struct ut_nvme_req *req; 528 529 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 530 if (req->cb_arg == cb_arg) { 531 break; 532 } 533 } 534 535 return req; 536 } 537 538 static struct spdk_bdev_io * 539 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 540 struct spdk_io_channel *ch) 541 { 542 struct spdk_bdev_io *bdev_io; 543 544 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 545 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 546 bdev_io->type = type; 547 bdev_io->bdev = &nbdev->disk; 548 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 549 550 return bdev_io; 551 } 552 553 static void 554 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 555 { 556 bdev_io->u.bdev.iovs = &bdev_io->iov; 557 bdev_io->u.bdev.iovcnt = 1; 558 559 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 560 bdev_io->iov.iov_len = 4096; 561 } 562 563 static void 564 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 565 { 566 if (ctrlr->is_failed) { 567 free(ctrlr); 568 return; 569 } 570 571 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 572 if (probe_ctx->cb_ctx) { 573 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 574 } 575 576 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 577 578 if (probe_ctx->attach_cb) { 579 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 580 } 581 } 582 583 int 584 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 585 { 586 struct spdk_nvme_ctrlr *ctrlr, *tmp; 587 588 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 589 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 590 continue; 591 } 592 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 593 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 594 } 595 596 free(probe_ctx); 597 598 return 0; 599 } 600 601 struct spdk_nvme_probe_ctx * 602 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 603 const struct spdk_nvme_ctrlr_opts *opts, 604 spdk_nvme_attach_cb attach_cb) 605 { 606 struct spdk_nvme_probe_ctx *probe_ctx; 607 608 if (trid == NULL) { 609 return NULL; 610 } 611 612 probe_ctx = calloc(1, sizeof(*probe_ctx)); 613 if (probe_ctx == NULL) { 614 return NULL; 615 } 616 617 probe_ctx->trid = *trid; 618 probe_ctx->cb_ctx = (void *)opts; 619 probe_ctx->attach_cb = attach_cb; 620 621 return probe_ctx; 622 } 623 624 int 625 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 626 { 627 if (ctrlr->attached) { 628 ut_detach_ctrlr(ctrlr); 629 } 630 631 return 0; 632 } 633 634 int 635 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 636 { 637 SPDK_CU_ASSERT_FATAL(ctx != NULL); 638 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 639 640 return 0; 641 } 642 643 int 644 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 645 { 646 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 647 } 648 649 void 650 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 651 { 652 memset(opts, 0, opts_size); 653 654 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 655 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 656 } 657 658 const struct spdk_nvme_ctrlr_data * 659 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 660 { 661 return &ctrlr->cdata; 662 } 663 664 uint32_t 665 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 666 { 667 return ctrlr->num_ns; 668 } 669 670 struct spdk_nvme_ns * 671 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 672 { 673 if (nsid < 1 || nsid > ctrlr->num_ns) { 674 return NULL; 675 } 676 677 return &ctrlr->ns[nsid - 1]; 678 } 679 680 bool 681 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 682 { 683 if (nsid < 1 || nsid > ctrlr->num_ns) { 684 return false; 685 } 686 687 return ctrlr->ns[nsid - 1].is_active; 688 } 689 690 union spdk_nvme_csts_register 691 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 692 { 693 union spdk_nvme_csts_register csts; 694 695 csts.raw = 0; 696 697 return csts; 698 } 699 700 union spdk_nvme_vs_register 701 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 702 { 703 union spdk_nvme_vs_register vs; 704 705 vs.raw = 0; 706 707 return vs; 708 } 709 710 struct spdk_nvme_qpair * 711 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 712 const struct spdk_nvme_io_qpair_opts *user_opts, 713 size_t opts_size) 714 { 715 struct spdk_nvme_qpair *qpair; 716 717 qpair = calloc(1, sizeof(*qpair)); 718 if (qpair == NULL) { 719 return NULL; 720 } 721 722 qpair->ctrlr = ctrlr; 723 TAILQ_INIT(&qpair->outstanding_reqs); 724 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 725 726 return qpair; 727 } 728 729 static void 730 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 731 { 732 struct spdk_nvme_poll_group *group = qpair->poll_group; 733 734 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 735 736 qpair->poll_group_tailq_head = &group->connected_qpairs; 737 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 738 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 739 } 740 741 static void 742 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 743 { 744 struct spdk_nvme_poll_group *group = qpair->poll_group; 745 746 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 747 748 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 749 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 750 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 751 } 752 753 int 754 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 755 struct spdk_nvme_qpair *qpair) 756 { 757 if (qpair->is_connected) { 758 return -EISCONN; 759 } 760 761 qpair->is_connected = true; 762 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 763 764 if (qpair->poll_group) { 765 nvme_poll_group_connect_qpair(qpair); 766 } 767 768 return 0; 769 } 770 771 void 772 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 773 { 774 if (!qpair->is_connected) { 775 return; 776 } 777 778 qpair->is_connected = false; 779 780 if (qpair->poll_group != NULL) { 781 nvme_poll_group_disconnect_qpair(qpair); 782 } 783 } 784 785 int 786 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 787 { 788 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 789 790 if (qpair->in_completion_context) { 791 qpair->delete_after_completion_context = true; 792 return 0; 793 } 794 795 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 796 797 if (qpair->poll_group != NULL) { 798 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 799 } 800 801 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 802 803 CU_ASSERT(qpair->num_outstanding_reqs == 0); 804 805 free(qpair); 806 807 return 0; 808 } 809 810 int 811 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 812 { 813 if (ctrlr->fail_reset) { 814 ctrlr->is_failed = true; 815 return -EIO; 816 } 817 818 ctrlr->adminq.is_connected = true; 819 return 0; 820 } 821 822 void 823 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 824 { 825 } 826 827 int 828 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 829 { 830 if (ctrlr->is_removed) { 831 return -ENXIO; 832 } 833 834 ctrlr->adminq.is_connected = false; 835 ctrlr->is_failed = false; 836 837 return 0; 838 } 839 840 void 841 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 842 { 843 ctrlr->is_failed = true; 844 } 845 846 bool 847 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 848 { 849 return ctrlr->is_failed; 850 } 851 852 spdk_nvme_qp_failure_reason 853 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 854 { 855 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 856 } 857 858 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 859 sizeof(uint32_t)) 860 static void 861 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 862 { 863 struct spdk_nvme_ana_page ana_hdr; 864 char _ana_desc[UT_ANA_DESC_SIZE]; 865 struct spdk_nvme_ana_group_descriptor *ana_desc; 866 struct spdk_nvme_ns *ns; 867 uint32_t i; 868 869 memset(&ana_hdr, 0, sizeof(ana_hdr)); 870 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 871 872 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 873 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 874 875 buf += sizeof(ana_hdr); 876 length -= sizeof(ana_hdr); 877 878 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 879 880 for (i = 0; i < ctrlr->num_ns; i++) { 881 ns = &ctrlr->ns[i]; 882 883 if (!ns->is_active) { 884 continue; 885 } 886 887 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 888 889 ana_desc->ana_group_id = ns->id; 890 ana_desc->num_of_nsid = 1; 891 ana_desc->ana_state = ns->ana_state; 892 ana_desc->nsid[0] = ns->id; 893 894 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 895 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 896 897 buf += UT_ANA_DESC_SIZE; 898 length -= UT_ANA_DESC_SIZE; 899 } 900 } 901 902 int 903 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 904 uint8_t log_page, uint32_t nsid, 905 void *payload, uint32_t payload_size, 906 uint64_t offset, 907 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 908 { 909 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 910 SPDK_CU_ASSERT_FATAL(offset == 0); 911 ut_create_ana_log_page(ctrlr, payload, payload_size); 912 } 913 914 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 915 cb_fn, cb_arg); 916 } 917 918 int 919 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 920 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 921 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 922 { 923 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 924 } 925 926 int 927 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 928 void *cmd_cb_arg, 929 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 930 { 931 struct ut_nvme_req *req = NULL, *abort_req; 932 933 if (qpair == NULL) { 934 qpair = &ctrlr->adminq; 935 } 936 937 abort_req = calloc(1, sizeof(*abort_req)); 938 if (abort_req == NULL) { 939 return -ENOMEM; 940 } 941 942 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 943 if (req->cb_arg == cmd_cb_arg) { 944 break; 945 } 946 } 947 948 if (req == NULL) { 949 free(abort_req); 950 return -ENOENT; 951 } 952 953 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 954 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 955 956 abort_req->opc = SPDK_NVME_OPC_ABORT; 957 abort_req->cb_fn = cb_fn; 958 abort_req->cb_arg = cb_arg; 959 960 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 961 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 962 abort_req->cpl.cdw0 = 0; 963 964 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 965 ctrlr->adminq.num_outstanding_reqs++; 966 967 return 0; 968 } 969 970 int32_t 971 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 972 { 973 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 974 } 975 976 uint32_t 977 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 978 { 979 return ns->id; 980 } 981 982 struct spdk_nvme_ctrlr * 983 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 984 { 985 return ns->ctrlr; 986 } 987 988 static inline struct spdk_nvme_ns_data * 989 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 990 { 991 return &ns->ctrlr->nsdata[ns->id - 1]; 992 } 993 994 const struct spdk_nvme_ns_data * 995 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 996 { 997 return _nvme_ns_get_data(ns); 998 } 999 1000 uint64_t 1001 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1002 { 1003 return _nvme_ns_get_data(ns)->nsze; 1004 } 1005 1006 const struct spdk_uuid * 1007 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1008 { 1009 return ns->uuid; 1010 } 1011 1012 enum spdk_nvme_csi 1013 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1014 return ns->csi; 1015 } 1016 1017 int 1018 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1019 void *metadata, uint64_t lba, uint32_t lba_count, 1020 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1021 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1022 { 1023 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1024 } 1025 1026 int 1027 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1028 void *buffer, void *metadata, uint64_t lba, 1029 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1030 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1031 { 1032 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1033 } 1034 1035 int 1036 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1037 uint64_t lba, uint32_t lba_count, 1038 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1039 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1040 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1041 uint16_t apptag_mask, uint16_t apptag) 1042 { 1043 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1044 } 1045 1046 int 1047 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1048 uint64_t lba, uint32_t lba_count, 1049 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1050 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1051 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1052 uint16_t apptag_mask, uint16_t apptag) 1053 { 1054 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1055 } 1056 1057 static bool g_ut_readv_ext_called; 1058 int 1059 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1060 uint64_t lba, uint32_t lba_count, 1061 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1062 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1063 spdk_nvme_req_next_sge_cb next_sge_fn, 1064 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1065 { 1066 g_ut_readv_ext_called = true; 1067 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1068 } 1069 1070 static bool g_ut_read_ext_called; 1071 int 1072 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1073 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1074 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1075 { 1076 g_ut_read_ext_called = true; 1077 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1078 } 1079 1080 static bool g_ut_writev_ext_called; 1081 int 1082 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1083 uint64_t lba, uint32_t lba_count, 1084 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1085 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1086 spdk_nvme_req_next_sge_cb next_sge_fn, 1087 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1088 { 1089 g_ut_writev_ext_called = true; 1090 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1091 } 1092 1093 static bool g_ut_write_ext_called; 1094 int 1095 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1096 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1097 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1098 { 1099 g_ut_write_ext_called = true; 1100 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1101 } 1102 1103 int 1104 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1105 uint64_t lba, uint32_t lba_count, 1106 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1107 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1108 spdk_nvme_req_next_sge_cb next_sge_fn, 1109 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1110 { 1111 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1112 } 1113 1114 int 1115 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1116 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1117 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1118 { 1119 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1120 } 1121 1122 int 1123 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1124 uint64_t lba, uint32_t lba_count, 1125 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1126 uint32_t io_flags) 1127 { 1128 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1129 } 1130 1131 int 1132 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1133 const struct spdk_nvme_scc_source_range *ranges, 1134 uint16_t num_ranges, uint64_t dest_lba, 1135 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1136 { 1137 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1138 } 1139 1140 struct spdk_nvme_poll_group * 1141 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1142 { 1143 struct spdk_nvme_poll_group *group; 1144 1145 group = calloc(1, sizeof(*group)); 1146 if (group == NULL) { 1147 return NULL; 1148 } 1149 1150 group->ctx = ctx; 1151 if (table != NULL) { 1152 group->accel_fn_table = *table; 1153 } 1154 TAILQ_INIT(&group->connected_qpairs); 1155 TAILQ_INIT(&group->disconnected_qpairs); 1156 1157 return group; 1158 } 1159 1160 int 1161 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1162 { 1163 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1164 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1165 return -EBUSY; 1166 } 1167 1168 free(group); 1169 1170 return 0; 1171 } 1172 1173 spdk_nvme_qp_failure_reason 1174 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1175 { 1176 return qpair->failure_reason; 1177 } 1178 1179 bool 1180 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1181 { 1182 return qpair->is_connected; 1183 } 1184 1185 int32_t 1186 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1187 uint32_t max_completions) 1188 { 1189 struct ut_nvme_req *req, *tmp; 1190 uint32_t num_completions = 0; 1191 1192 if (!qpair->is_connected) { 1193 return -ENXIO; 1194 } 1195 1196 qpair->in_completion_context = true; 1197 1198 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1199 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1200 qpair->num_outstanding_reqs--; 1201 1202 req->cb_fn(req->cb_arg, &req->cpl); 1203 1204 free(req); 1205 num_completions++; 1206 } 1207 1208 qpair->in_completion_context = false; 1209 if (qpair->delete_after_completion_context) { 1210 spdk_nvme_ctrlr_free_io_qpair(qpair); 1211 } 1212 1213 return num_completions; 1214 } 1215 1216 int64_t 1217 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1218 uint32_t completions_per_qpair, 1219 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1220 { 1221 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1222 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1223 1224 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1225 1226 if (disconnected_qpair_cb == NULL) { 1227 return -EINVAL; 1228 } 1229 1230 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1231 disconnected_qpair_cb(qpair, group->ctx); 1232 } 1233 1234 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1235 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1236 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1237 /* Bump the number of completions so this counts as "busy" */ 1238 num_completions++; 1239 continue; 1240 } 1241 1242 local_completions = spdk_nvme_qpair_process_completions(qpair, 1243 completions_per_qpair); 1244 if (local_completions < 0 && error_reason == 0) { 1245 error_reason = local_completions; 1246 } else { 1247 num_completions += local_completions; 1248 assert(num_completions >= 0); 1249 } 1250 } 1251 1252 return error_reason ? error_reason : num_completions; 1253 } 1254 1255 int 1256 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1257 struct spdk_nvme_qpair *qpair) 1258 { 1259 CU_ASSERT(!qpair->is_connected); 1260 1261 qpair->poll_group = group; 1262 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1263 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1264 1265 return 0; 1266 } 1267 1268 int 1269 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1270 struct spdk_nvme_qpair *qpair) 1271 { 1272 CU_ASSERT(!qpair->is_connected); 1273 1274 if (qpair->poll_group == NULL) { 1275 return -ENOENT; 1276 } 1277 1278 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1279 1280 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1281 1282 qpair->poll_group = NULL; 1283 qpair->poll_group_tailq_head = NULL; 1284 1285 return 0; 1286 } 1287 1288 int 1289 spdk_bdev_register(struct spdk_bdev *bdev) 1290 { 1291 g_ut_registered_bdev = bdev; 1292 1293 return g_ut_register_bdev_status; 1294 } 1295 1296 void 1297 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1298 { 1299 int rc; 1300 1301 rc = bdev->fn_table->destruct(bdev->ctxt); 1302 1303 if (bdev == g_ut_registered_bdev) { 1304 g_ut_registered_bdev = NULL; 1305 } 1306 1307 if (rc <= 0 && cb_fn != NULL) { 1308 cb_fn(cb_arg, rc); 1309 } 1310 } 1311 1312 int 1313 spdk_bdev_open_ext(const char *bdev_name, bool write, 1314 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1315 struct spdk_bdev_desc **desc) 1316 { 1317 if (g_ut_registered_bdev == NULL || 1318 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1319 return -ENODEV; 1320 } 1321 1322 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1323 1324 return 0; 1325 } 1326 1327 struct spdk_bdev * 1328 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1329 { 1330 return (struct spdk_bdev *)desc; 1331 } 1332 1333 int 1334 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1335 { 1336 bdev->blockcnt = size; 1337 1338 return 0; 1339 } 1340 1341 struct spdk_io_channel * 1342 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1343 { 1344 return (struct spdk_io_channel *)bdev_io->internal.ch; 1345 } 1346 1347 struct spdk_thread * 1348 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1349 { 1350 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1351 } 1352 1353 void 1354 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1355 { 1356 bdev_io->internal.status = status; 1357 bdev_io->internal.in_submit_request = false; 1358 } 1359 1360 void 1361 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1362 { 1363 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1364 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1365 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1366 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1367 } else { 1368 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1369 } 1370 1371 bdev_io->internal.error.nvme.cdw0 = cdw0; 1372 bdev_io->internal.error.nvme.sct = sct; 1373 bdev_io->internal.error.nvme.sc = sc; 1374 1375 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1376 } 1377 1378 void 1379 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1380 { 1381 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1382 1383 ut_bdev_io_set_buf(bdev_io); 1384 1385 cb(ch, bdev_io, true); 1386 } 1387 1388 static void 1389 test_create_ctrlr(void) 1390 { 1391 struct spdk_nvme_transport_id trid = {}; 1392 struct spdk_nvme_ctrlr ctrlr = {}; 1393 int rc; 1394 1395 ut_init_trid(&trid); 1396 1397 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1398 CU_ASSERT(rc == 0); 1399 1400 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1401 1402 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1403 CU_ASSERT(rc == 0); 1404 1405 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1406 1407 poll_threads(); 1408 spdk_delay_us(1000); 1409 poll_threads(); 1410 1411 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1412 } 1413 1414 static void 1415 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1416 { 1417 bool *detect_remove = cb_arg; 1418 1419 CU_ASSERT(rc != 0); 1420 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1421 1422 *detect_remove = true; 1423 } 1424 1425 static void 1426 test_reset_ctrlr(void) 1427 { 1428 struct spdk_nvme_transport_id trid = {}; 1429 struct spdk_nvme_ctrlr ctrlr = {}; 1430 struct nvme_ctrlr *nvme_ctrlr = NULL; 1431 struct nvme_path_id *curr_trid; 1432 struct spdk_io_channel *ch1, *ch2; 1433 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1434 bool detect_remove; 1435 int rc; 1436 1437 ut_init_trid(&trid); 1438 TAILQ_INIT(&ctrlr.active_io_qpairs); 1439 1440 set_thread(0); 1441 1442 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1443 CU_ASSERT(rc == 0); 1444 1445 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1446 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1447 1448 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1449 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1450 1451 ch1 = spdk_get_io_channel(nvme_ctrlr); 1452 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1453 1454 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1455 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1456 1457 set_thread(1); 1458 1459 ch2 = spdk_get_io_channel(nvme_ctrlr); 1460 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1461 1462 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1463 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1464 1465 /* Reset starts from thread 1. */ 1466 set_thread(1); 1467 1468 /* Case 1: ctrlr is already being destructed. */ 1469 nvme_ctrlr->destruct = true; 1470 1471 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1472 CU_ASSERT(rc == -ENXIO); 1473 1474 /* Case 2: reset is in progress. */ 1475 nvme_ctrlr->destruct = false; 1476 nvme_ctrlr->resetting = true; 1477 1478 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1479 CU_ASSERT(rc == -EBUSY); 1480 1481 /* Case 3: reset completes successfully. */ 1482 nvme_ctrlr->resetting = false; 1483 curr_trid->last_failed_tsc = spdk_get_ticks(); 1484 ctrlr.is_failed = true; 1485 1486 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1487 CU_ASSERT(rc == 0); 1488 CU_ASSERT(nvme_ctrlr->resetting == true); 1489 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1490 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1491 1492 poll_thread_times(0, 3); 1493 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1494 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1495 1496 poll_thread_times(0, 1); 1497 poll_thread_times(1, 1); 1498 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1499 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1500 CU_ASSERT(ctrlr.is_failed == true); 1501 1502 poll_thread_times(1, 1); 1503 poll_thread_times(0, 1); 1504 CU_ASSERT(ctrlr.is_failed == false); 1505 CU_ASSERT(ctrlr.adminq.is_connected == false); 1506 1507 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1508 poll_thread_times(0, 2); 1509 CU_ASSERT(ctrlr.adminq.is_connected == true); 1510 1511 poll_thread_times(0, 1); 1512 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1513 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1514 1515 poll_thread_times(1, 1); 1516 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1517 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1518 CU_ASSERT(nvme_ctrlr->resetting == true); 1519 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1520 1521 poll_thread_times(0, 2); 1522 CU_ASSERT(nvme_ctrlr->resetting == true); 1523 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1524 poll_thread_times(1, 1); 1525 CU_ASSERT(nvme_ctrlr->resetting == true); 1526 poll_thread_times(0, 1); 1527 CU_ASSERT(nvme_ctrlr->resetting == false); 1528 1529 /* Case 4: ctrlr is already removed. */ 1530 ctrlr.is_removed = true; 1531 1532 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1533 CU_ASSERT(rc == 0); 1534 1535 detect_remove = false; 1536 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1537 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1538 1539 poll_threads(); 1540 1541 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1542 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1543 CU_ASSERT(detect_remove == true); 1544 1545 ctrlr.is_removed = false; 1546 1547 spdk_put_io_channel(ch2); 1548 1549 set_thread(0); 1550 1551 spdk_put_io_channel(ch1); 1552 1553 poll_threads(); 1554 1555 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1556 CU_ASSERT(rc == 0); 1557 1558 poll_threads(); 1559 spdk_delay_us(1000); 1560 poll_threads(); 1561 1562 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1563 } 1564 1565 static void 1566 test_race_between_reset_and_destruct_ctrlr(void) 1567 { 1568 struct spdk_nvme_transport_id trid = {}; 1569 struct spdk_nvme_ctrlr ctrlr = {}; 1570 struct nvme_ctrlr *nvme_ctrlr; 1571 struct spdk_io_channel *ch1, *ch2; 1572 int rc; 1573 1574 ut_init_trid(&trid); 1575 TAILQ_INIT(&ctrlr.active_io_qpairs); 1576 1577 set_thread(0); 1578 1579 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1580 CU_ASSERT(rc == 0); 1581 1582 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1583 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1584 1585 ch1 = spdk_get_io_channel(nvme_ctrlr); 1586 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1587 1588 set_thread(1); 1589 1590 ch2 = spdk_get_io_channel(nvme_ctrlr); 1591 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1592 1593 /* Reset starts from thread 1. */ 1594 set_thread(1); 1595 1596 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1597 CU_ASSERT(rc == 0); 1598 CU_ASSERT(nvme_ctrlr->resetting == true); 1599 1600 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1601 set_thread(0); 1602 1603 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1604 CU_ASSERT(rc == 0); 1605 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1606 CU_ASSERT(nvme_ctrlr->destruct == true); 1607 CU_ASSERT(nvme_ctrlr->resetting == true); 1608 1609 poll_threads(); 1610 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1611 poll_threads(); 1612 1613 /* Reset completed but ctrlr is not still destructed yet. */ 1614 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1615 CU_ASSERT(nvme_ctrlr->destruct == true); 1616 CU_ASSERT(nvme_ctrlr->resetting == false); 1617 1618 /* New reset request is rejected. */ 1619 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1620 CU_ASSERT(rc == -ENXIO); 1621 1622 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1623 * However there are two channels and destruct is not completed yet. 1624 */ 1625 poll_threads(); 1626 1627 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1628 1629 set_thread(0); 1630 1631 spdk_put_io_channel(ch1); 1632 1633 set_thread(1); 1634 1635 spdk_put_io_channel(ch2); 1636 1637 poll_threads(); 1638 spdk_delay_us(1000); 1639 poll_threads(); 1640 1641 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1642 } 1643 1644 static void 1645 test_failover_ctrlr(void) 1646 { 1647 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1648 struct spdk_nvme_ctrlr ctrlr = {}; 1649 struct nvme_ctrlr *nvme_ctrlr = NULL; 1650 struct nvme_path_id *curr_trid, *next_trid; 1651 struct spdk_io_channel *ch1, *ch2; 1652 int rc; 1653 1654 ut_init_trid(&trid1); 1655 ut_init_trid2(&trid2); 1656 TAILQ_INIT(&ctrlr.active_io_qpairs); 1657 1658 set_thread(0); 1659 1660 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1661 CU_ASSERT(rc == 0); 1662 1663 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1664 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1665 1666 ch1 = spdk_get_io_channel(nvme_ctrlr); 1667 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1668 1669 set_thread(1); 1670 1671 ch2 = spdk_get_io_channel(nvme_ctrlr); 1672 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1673 1674 /* First, test one trid case. */ 1675 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1676 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1677 1678 /* Failover starts from thread 1. */ 1679 set_thread(1); 1680 1681 /* Case 1: ctrlr is already being destructed. */ 1682 nvme_ctrlr->destruct = true; 1683 1684 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1685 CU_ASSERT(rc == -ENXIO); 1686 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1687 1688 /* Case 2: reset is in progress. */ 1689 nvme_ctrlr->destruct = false; 1690 nvme_ctrlr->resetting = true; 1691 1692 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1693 CU_ASSERT(rc == -EINPROGRESS); 1694 1695 /* Case 3: reset completes successfully. */ 1696 nvme_ctrlr->resetting = false; 1697 1698 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1699 CU_ASSERT(rc == 0); 1700 1701 CU_ASSERT(nvme_ctrlr->resetting == true); 1702 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1703 1704 poll_threads(); 1705 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1706 poll_threads(); 1707 1708 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1709 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1710 1711 CU_ASSERT(nvme_ctrlr->resetting == false); 1712 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1713 1714 set_thread(0); 1715 1716 /* Second, test two trids case. */ 1717 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1718 CU_ASSERT(rc == 0); 1719 1720 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1721 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1722 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1723 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1724 1725 /* Failover starts from thread 1. */ 1726 set_thread(1); 1727 1728 /* Case 4: reset is in progress. */ 1729 nvme_ctrlr->resetting = true; 1730 1731 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1732 CU_ASSERT(rc == -EINPROGRESS); 1733 1734 /* Case 5: failover completes successfully. */ 1735 nvme_ctrlr->resetting = false; 1736 1737 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1738 CU_ASSERT(rc == 0); 1739 1740 CU_ASSERT(nvme_ctrlr->resetting == true); 1741 1742 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1743 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1744 CU_ASSERT(next_trid != curr_trid); 1745 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1746 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1747 1748 poll_threads(); 1749 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1750 poll_threads(); 1751 1752 CU_ASSERT(nvme_ctrlr->resetting == false); 1753 1754 spdk_put_io_channel(ch2); 1755 1756 set_thread(0); 1757 1758 spdk_put_io_channel(ch1); 1759 1760 poll_threads(); 1761 1762 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1763 CU_ASSERT(rc == 0); 1764 1765 poll_threads(); 1766 spdk_delay_us(1000); 1767 poll_threads(); 1768 1769 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1770 } 1771 1772 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1773 * 1774 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1775 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1776 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1777 * have been active, i.e., the head of the list until the failover completed. 1778 * However trid3 was inserted to the head of the list by mistake. 1779 * 1780 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1781 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1782 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1783 * may be executed repeatedly before failover is executed. Hence this bug is real. 1784 * 1785 * The following test verifies the fix. 1786 */ 1787 static void 1788 test_race_between_failover_and_add_secondary_trid(void) 1789 { 1790 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1791 struct spdk_nvme_ctrlr ctrlr = {}; 1792 struct nvme_ctrlr *nvme_ctrlr = NULL; 1793 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1794 struct spdk_io_channel *ch1, *ch2; 1795 int rc; 1796 1797 ut_init_trid(&trid1); 1798 ut_init_trid2(&trid2); 1799 ut_init_trid3(&trid3); 1800 TAILQ_INIT(&ctrlr.active_io_qpairs); 1801 1802 set_thread(0); 1803 1804 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1805 CU_ASSERT(rc == 0); 1806 1807 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1808 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1809 1810 ch1 = spdk_get_io_channel(nvme_ctrlr); 1811 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1812 1813 set_thread(1); 1814 1815 ch2 = spdk_get_io_channel(nvme_ctrlr); 1816 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1817 1818 set_thread(0); 1819 1820 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1821 CU_ASSERT(rc == 0); 1822 1823 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1824 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1825 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1826 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1827 path_id2 = TAILQ_NEXT(path_id1, link); 1828 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1829 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1830 1831 ctrlr.fail_reset = true; 1832 1833 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1834 CU_ASSERT(rc == 0); 1835 1836 poll_threads(); 1837 1838 CU_ASSERT(path_id1->last_failed_tsc != 0); 1839 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1840 1841 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1842 CU_ASSERT(rc == 0); 1843 1844 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1845 CU_ASSERT(rc == 0); 1846 1847 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1848 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1849 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1850 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1851 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1852 path_id3 = TAILQ_NEXT(path_id2, link); 1853 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1854 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1855 1856 poll_threads(); 1857 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1858 poll_threads(); 1859 1860 spdk_put_io_channel(ch1); 1861 1862 set_thread(1); 1863 1864 spdk_put_io_channel(ch2); 1865 1866 poll_threads(); 1867 1868 set_thread(0); 1869 1870 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1871 CU_ASSERT(rc == 0); 1872 1873 poll_threads(); 1874 spdk_delay_us(1000); 1875 poll_threads(); 1876 1877 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1878 } 1879 1880 static void 1881 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1882 { 1883 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1884 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1885 } 1886 1887 static void 1888 test_pending_reset(void) 1889 { 1890 struct spdk_nvme_transport_id trid = {}; 1891 struct spdk_nvme_ctrlr *ctrlr; 1892 struct nvme_ctrlr *nvme_ctrlr = NULL; 1893 const int STRING_SIZE = 32; 1894 const char *attached_names[STRING_SIZE]; 1895 struct nvme_bdev *bdev; 1896 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1897 struct spdk_io_channel *ch1, *ch2; 1898 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1899 struct nvme_io_path *io_path1, *io_path2; 1900 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1901 int rc; 1902 1903 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1904 ut_init_trid(&trid); 1905 1906 set_thread(0); 1907 1908 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1909 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1910 1911 g_ut_attach_ctrlr_status = 0; 1912 g_ut_attach_bdev_count = 1; 1913 1914 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1915 attach_ctrlr_done, NULL, NULL, NULL, false); 1916 CU_ASSERT(rc == 0); 1917 1918 spdk_delay_us(1000); 1919 poll_threads(); 1920 1921 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1922 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1923 1924 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1925 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1926 1927 ch1 = spdk_get_io_channel(bdev); 1928 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1929 1930 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1931 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1932 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1933 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1934 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1935 1936 set_thread(1); 1937 1938 ch2 = spdk_get_io_channel(bdev); 1939 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1940 1941 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1942 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1943 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1944 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1945 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1946 1947 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1948 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1949 1950 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1951 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1952 1953 /* The first reset request is submitted on thread 1, and the second reset request 1954 * is submitted on thread 0 while processing the first request. 1955 */ 1956 bdev_nvme_submit_request(ch2, first_bdev_io); 1957 CU_ASSERT(nvme_ctrlr->resetting == true); 1958 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1959 1960 set_thread(0); 1961 1962 bdev_nvme_submit_request(ch1, second_bdev_io); 1963 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1964 1965 poll_threads(); 1966 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1967 poll_threads(); 1968 1969 CU_ASSERT(nvme_ctrlr->resetting == false); 1970 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1971 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1972 1973 /* The first reset request is submitted on thread 1, and the second reset request 1974 * is submitted on thread 0 while processing the first request. 1975 * 1976 * The difference from the above scenario is that the controller is removed while 1977 * processing the first request. Hence both reset requests should fail. 1978 */ 1979 set_thread(1); 1980 1981 bdev_nvme_submit_request(ch2, first_bdev_io); 1982 CU_ASSERT(nvme_ctrlr->resetting == true); 1983 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1984 1985 set_thread(0); 1986 1987 bdev_nvme_submit_request(ch1, second_bdev_io); 1988 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1989 1990 ctrlr->fail_reset = true; 1991 1992 poll_threads(); 1993 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1994 poll_threads(); 1995 1996 CU_ASSERT(nvme_ctrlr->resetting == false); 1997 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1998 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1999 2000 spdk_put_io_channel(ch1); 2001 2002 set_thread(1); 2003 2004 spdk_put_io_channel(ch2); 2005 2006 poll_threads(); 2007 2008 set_thread(0); 2009 2010 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2011 CU_ASSERT(rc == 0); 2012 2013 poll_threads(); 2014 spdk_delay_us(1000); 2015 poll_threads(); 2016 2017 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2018 2019 free(first_bdev_io); 2020 free(second_bdev_io); 2021 } 2022 2023 static void 2024 test_attach_ctrlr(void) 2025 { 2026 struct spdk_nvme_transport_id trid = {}; 2027 struct spdk_nvme_ctrlr *ctrlr; 2028 struct nvme_ctrlr *nvme_ctrlr; 2029 const int STRING_SIZE = 32; 2030 const char *attached_names[STRING_SIZE]; 2031 struct nvme_bdev *nbdev; 2032 int rc; 2033 2034 set_thread(0); 2035 2036 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2037 ut_init_trid(&trid); 2038 2039 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2040 * by probe polling. 2041 */ 2042 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2043 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2044 2045 ctrlr->is_failed = true; 2046 g_ut_attach_ctrlr_status = -EIO; 2047 g_ut_attach_bdev_count = 0; 2048 2049 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2050 attach_ctrlr_done, NULL, NULL, NULL, false); 2051 CU_ASSERT(rc == 0); 2052 2053 spdk_delay_us(1000); 2054 poll_threads(); 2055 2056 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2057 2058 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2059 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2060 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2061 2062 g_ut_attach_ctrlr_status = 0; 2063 2064 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2065 attach_ctrlr_done, NULL, NULL, NULL, false); 2066 CU_ASSERT(rc == 0); 2067 2068 spdk_delay_us(1000); 2069 poll_threads(); 2070 2071 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2072 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2073 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2074 2075 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2076 CU_ASSERT(rc == 0); 2077 2078 poll_threads(); 2079 spdk_delay_us(1000); 2080 poll_threads(); 2081 2082 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2083 2084 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2085 * one nvme_bdev is created. 2086 */ 2087 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2088 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2089 2090 g_ut_attach_bdev_count = 1; 2091 2092 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2093 attach_ctrlr_done, NULL, NULL, NULL, false); 2094 CU_ASSERT(rc == 0); 2095 2096 spdk_delay_us(1000); 2097 poll_threads(); 2098 2099 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2100 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2101 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2102 2103 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2104 attached_names[0] = NULL; 2105 2106 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2107 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2108 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2109 2110 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2111 CU_ASSERT(rc == 0); 2112 2113 poll_threads(); 2114 spdk_delay_us(1000); 2115 poll_threads(); 2116 2117 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2118 2119 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2120 * created because creating one nvme_bdev failed. 2121 */ 2122 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2123 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2124 2125 g_ut_register_bdev_status = -EINVAL; 2126 g_ut_attach_bdev_count = 0; 2127 2128 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2129 attach_ctrlr_done, NULL, NULL, NULL, false); 2130 CU_ASSERT(rc == 0); 2131 2132 spdk_delay_us(1000); 2133 poll_threads(); 2134 2135 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2136 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2137 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2138 2139 CU_ASSERT(attached_names[0] == NULL); 2140 2141 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2142 CU_ASSERT(rc == 0); 2143 2144 poll_threads(); 2145 spdk_delay_us(1000); 2146 poll_threads(); 2147 2148 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2149 2150 g_ut_register_bdev_status = 0; 2151 } 2152 2153 static void 2154 test_aer_cb(void) 2155 { 2156 struct spdk_nvme_transport_id trid = {}; 2157 struct spdk_nvme_ctrlr *ctrlr; 2158 struct nvme_ctrlr *nvme_ctrlr; 2159 struct nvme_bdev *bdev; 2160 const int STRING_SIZE = 32; 2161 const char *attached_names[STRING_SIZE]; 2162 union spdk_nvme_async_event_completion event = {}; 2163 struct spdk_nvme_cpl cpl = {}; 2164 int rc; 2165 2166 set_thread(0); 2167 2168 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2169 ut_init_trid(&trid); 2170 2171 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2172 * namespaces are populated. 2173 */ 2174 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2175 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2176 2177 ctrlr->ns[0].is_active = false; 2178 2179 g_ut_attach_ctrlr_status = 0; 2180 g_ut_attach_bdev_count = 3; 2181 2182 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2183 attach_ctrlr_done, NULL, NULL, NULL, false); 2184 CU_ASSERT(rc == 0); 2185 2186 spdk_delay_us(1000); 2187 poll_threads(); 2188 2189 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2190 poll_threads(); 2191 2192 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2193 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2194 2195 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2196 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2197 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2198 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2199 2200 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2201 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2202 CU_ASSERT(bdev->disk.blockcnt == 1024); 2203 2204 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2205 * change the size of the 4th namespace. 2206 */ 2207 ctrlr->ns[0].is_active = true; 2208 ctrlr->ns[2].is_active = false; 2209 ctrlr->nsdata[3].nsze = 2048; 2210 2211 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2212 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2213 cpl.cdw0 = event.raw; 2214 2215 aer_cb(nvme_ctrlr, &cpl); 2216 2217 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2218 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2219 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2220 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2221 CU_ASSERT(bdev->disk.blockcnt == 2048); 2222 2223 /* Change ANA state of active namespaces. */ 2224 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2225 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2226 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2227 2228 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2229 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2230 cpl.cdw0 = event.raw; 2231 2232 aer_cb(nvme_ctrlr, &cpl); 2233 2234 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2235 poll_threads(); 2236 2237 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2238 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2239 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2240 2241 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2242 CU_ASSERT(rc == 0); 2243 2244 poll_threads(); 2245 spdk_delay_us(1000); 2246 poll_threads(); 2247 2248 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2249 } 2250 2251 static void 2252 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2253 enum spdk_bdev_io_type io_type) 2254 { 2255 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2256 struct nvme_io_path *io_path; 2257 struct spdk_nvme_qpair *qpair; 2258 2259 io_path = bdev_nvme_find_io_path(nbdev_ch); 2260 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2261 qpair = io_path->qpair->qpair; 2262 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2263 2264 bdev_io->type = io_type; 2265 bdev_io->internal.in_submit_request = true; 2266 2267 bdev_nvme_submit_request(ch, bdev_io); 2268 2269 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2270 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2271 2272 poll_threads(); 2273 2274 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2275 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2276 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2277 } 2278 2279 static void 2280 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2281 enum spdk_bdev_io_type io_type) 2282 { 2283 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2284 struct nvme_io_path *io_path; 2285 struct spdk_nvme_qpair *qpair; 2286 2287 io_path = bdev_nvme_find_io_path(nbdev_ch); 2288 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2289 qpair = io_path->qpair->qpair; 2290 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2291 2292 bdev_io->type = io_type; 2293 bdev_io->internal.in_submit_request = true; 2294 2295 bdev_nvme_submit_request(ch, bdev_io); 2296 2297 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2298 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2299 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2300 } 2301 2302 static void 2303 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2304 { 2305 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2306 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2307 struct ut_nvme_req *req; 2308 struct nvme_io_path *io_path; 2309 struct spdk_nvme_qpair *qpair; 2310 2311 io_path = bdev_nvme_find_io_path(nbdev_ch); 2312 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2313 qpair = io_path->qpair->qpair; 2314 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2315 2316 /* Only compare and write now. */ 2317 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2318 bdev_io->internal.in_submit_request = true; 2319 2320 bdev_nvme_submit_request(ch, bdev_io); 2321 2322 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2323 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2324 CU_ASSERT(bio->first_fused_submitted == true); 2325 2326 /* First outstanding request is compare operation. */ 2327 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2328 SPDK_CU_ASSERT_FATAL(req != NULL); 2329 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2330 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2331 2332 poll_threads(); 2333 2334 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2335 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2336 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2337 } 2338 2339 static void 2340 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2341 struct spdk_nvme_ctrlr *ctrlr) 2342 { 2343 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2344 bdev_io->internal.in_submit_request = true; 2345 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2346 2347 bdev_nvme_submit_request(ch, bdev_io); 2348 2349 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2350 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2351 2352 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2353 poll_thread_times(1, 1); 2354 2355 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2356 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2357 2358 poll_thread_times(0, 1); 2359 2360 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2361 } 2362 2363 static void 2364 test_submit_nvme_cmd(void) 2365 { 2366 struct spdk_nvme_transport_id trid = {}; 2367 struct spdk_nvme_ctrlr *ctrlr; 2368 struct nvme_ctrlr *nvme_ctrlr; 2369 const int STRING_SIZE = 32; 2370 const char *attached_names[STRING_SIZE]; 2371 struct nvme_bdev *bdev; 2372 struct spdk_bdev_io *bdev_io; 2373 struct spdk_io_channel *ch; 2374 int rc; 2375 2376 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2377 ut_init_trid(&trid); 2378 2379 set_thread(1); 2380 2381 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2382 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2383 2384 g_ut_attach_ctrlr_status = 0; 2385 g_ut_attach_bdev_count = 1; 2386 2387 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2388 attach_ctrlr_done, NULL, NULL, NULL, false); 2389 CU_ASSERT(rc == 0); 2390 2391 spdk_delay_us(1000); 2392 poll_threads(); 2393 2394 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2395 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2396 2397 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2398 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2399 2400 set_thread(0); 2401 2402 ch = spdk_get_io_channel(bdev); 2403 SPDK_CU_ASSERT_FATAL(ch != NULL); 2404 2405 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2406 2407 bdev_io->u.bdev.iovs = NULL; 2408 2409 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2410 2411 ut_bdev_io_set_buf(bdev_io); 2412 2413 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2414 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2415 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2416 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2417 2418 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2419 2420 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2421 2422 /* Verify that ext NVME API is called when data is described by memory domain */ 2423 g_ut_read_ext_called = false; 2424 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2425 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2426 CU_ASSERT(g_ut_read_ext_called == true); 2427 g_ut_read_ext_called = false; 2428 bdev_io->u.bdev.memory_domain = NULL; 2429 2430 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2431 2432 free(bdev_io); 2433 2434 spdk_put_io_channel(ch); 2435 2436 poll_threads(); 2437 2438 set_thread(1); 2439 2440 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2441 CU_ASSERT(rc == 0); 2442 2443 poll_threads(); 2444 spdk_delay_us(1000); 2445 poll_threads(); 2446 2447 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2448 } 2449 2450 static void 2451 test_add_remove_trid(void) 2452 { 2453 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2454 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2455 struct nvme_ctrlr *nvme_ctrlr = NULL; 2456 const int STRING_SIZE = 32; 2457 const char *attached_names[STRING_SIZE]; 2458 struct nvme_path_id *ctrid; 2459 int rc; 2460 2461 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2462 ut_init_trid(&path1.trid); 2463 ut_init_trid2(&path2.trid); 2464 ut_init_trid3(&path3.trid); 2465 2466 set_thread(0); 2467 2468 g_ut_attach_ctrlr_status = 0; 2469 g_ut_attach_bdev_count = 0; 2470 2471 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2472 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2473 2474 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2475 attach_ctrlr_done, NULL, NULL, NULL, false); 2476 CU_ASSERT(rc == 0); 2477 2478 spdk_delay_us(1000); 2479 poll_threads(); 2480 2481 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2482 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2483 2484 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2485 2486 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2487 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2488 2489 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2490 attach_ctrlr_done, NULL, NULL, NULL, false); 2491 CU_ASSERT(rc == 0); 2492 2493 spdk_delay_us(1000); 2494 poll_threads(); 2495 2496 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2497 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2498 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2499 break; 2500 } 2501 } 2502 CU_ASSERT(ctrid != NULL); 2503 2504 /* trid3 is not in the registered list. */ 2505 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2506 CU_ASSERT(rc == -ENXIO); 2507 2508 /* trid2 is not used, and simply removed. */ 2509 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2510 CU_ASSERT(rc == 0); 2511 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2512 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2513 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2514 } 2515 2516 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2517 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2518 2519 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2520 attach_ctrlr_done, NULL, NULL, NULL, false); 2521 CU_ASSERT(rc == 0); 2522 2523 spdk_delay_us(1000); 2524 poll_threads(); 2525 2526 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2527 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2528 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2529 break; 2530 } 2531 } 2532 CU_ASSERT(ctrid != NULL); 2533 2534 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2535 * If we add path2 again, path2 should be inserted between path1 and path3. 2536 * Then, we remove path2. It is not used, and simply removed. 2537 */ 2538 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2539 2540 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2541 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2542 2543 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2544 attach_ctrlr_done, NULL, NULL, NULL, false); 2545 CU_ASSERT(rc == 0); 2546 2547 spdk_delay_us(1000); 2548 poll_threads(); 2549 2550 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2551 2552 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2553 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2554 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2555 2556 ctrid = TAILQ_NEXT(ctrid, link); 2557 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2558 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2559 2560 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2561 CU_ASSERT(rc == 0); 2562 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2563 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2564 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2565 } 2566 2567 /* path1 is currently used and path3 is an alternative path. 2568 * If we remove path1, path is changed to path3. 2569 */ 2570 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2571 CU_ASSERT(rc == 0); 2572 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2573 CU_ASSERT(nvme_ctrlr->resetting == true); 2574 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2575 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2576 } 2577 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2578 2579 poll_threads(); 2580 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2581 poll_threads(); 2582 2583 CU_ASSERT(nvme_ctrlr->resetting == false); 2584 2585 /* path3 is the current and only path. If we remove path3, the corresponding 2586 * nvme_ctrlr is removed. 2587 */ 2588 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2589 CU_ASSERT(rc == 0); 2590 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2591 2592 poll_threads(); 2593 spdk_delay_us(1000); 2594 poll_threads(); 2595 2596 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2597 2598 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2599 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2600 2601 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2602 attach_ctrlr_done, NULL, NULL, NULL, false); 2603 CU_ASSERT(rc == 0); 2604 2605 spdk_delay_us(1000); 2606 poll_threads(); 2607 2608 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2609 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2610 2611 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2612 2613 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2614 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2615 2616 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2617 attach_ctrlr_done, NULL, NULL, NULL, false); 2618 CU_ASSERT(rc == 0); 2619 2620 spdk_delay_us(1000); 2621 poll_threads(); 2622 2623 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2624 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2625 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2626 break; 2627 } 2628 } 2629 CU_ASSERT(ctrid != NULL); 2630 2631 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2632 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2633 CU_ASSERT(rc == 0); 2634 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2635 2636 poll_threads(); 2637 spdk_delay_us(1000); 2638 poll_threads(); 2639 2640 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2641 } 2642 2643 static void 2644 test_abort(void) 2645 { 2646 struct spdk_nvme_transport_id trid = {}; 2647 struct nvme_ctrlr_opts opts = {}; 2648 struct spdk_nvme_ctrlr *ctrlr; 2649 struct nvme_ctrlr *nvme_ctrlr; 2650 const int STRING_SIZE = 32; 2651 const char *attached_names[STRING_SIZE]; 2652 struct nvme_bdev *bdev; 2653 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2654 struct spdk_io_channel *ch1, *ch2; 2655 struct nvme_bdev_channel *nbdev_ch1; 2656 struct nvme_io_path *io_path1; 2657 struct nvme_qpair *nvme_qpair1; 2658 int rc; 2659 2660 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2661 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2662 * are submitted on thread 1. Both should succeed. 2663 */ 2664 2665 ut_init_trid(&trid); 2666 2667 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2668 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2669 2670 g_ut_attach_ctrlr_status = 0; 2671 g_ut_attach_bdev_count = 1; 2672 2673 set_thread(1); 2674 2675 opts.ctrlr_loss_timeout_sec = -1; 2676 opts.reconnect_delay_sec = 1; 2677 2678 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2679 attach_ctrlr_done, NULL, NULL, &opts, false); 2680 CU_ASSERT(rc == 0); 2681 2682 spdk_delay_us(1000); 2683 poll_threads(); 2684 2685 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2686 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2687 2688 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2689 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2690 2691 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2692 ut_bdev_io_set_buf(write_io); 2693 2694 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2695 ut_bdev_io_set_buf(fuse_io); 2696 2697 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2698 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2699 2700 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2701 2702 set_thread(0); 2703 2704 ch1 = spdk_get_io_channel(bdev); 2705 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2706 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2707 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2708 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2709 nvme_qpair1 = io_path1->qpair; 2710 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2711 2712 set_thread(1); 2713 2714 ch2 = spdk_get_io_channel(bdev); 2715 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2716 2717 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2718 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2719 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2720 2721 /* Aborting the already completed request should fail. */ 2722 write_io->internal.in_submit_request = true; 2723 bdev_nvme_submit_request(ch1, write_io); 2724 poll_threads(); 2725 2726 CU_ASSERT(write_io->internal.in_submit_request == false); 2727 2728 abort_io->u.abort.bio_to_abort = write_io; 2729 abort_io->internal.in_submit_request = true; 2730 2731 bdev_nvme_submit_request(ch1, abort_io); 2732 2733 poll_threads(); 2734 2735 CU_ASSERT(abort_io->internal.in_submit_request == false); 2736 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2737 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2738 2739 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2740 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2741 2742 admin_io->internal.in_submit_request = true; 2743 bdev_nvme_submit_request(ch1, admin_io); 2744 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2745 poll_threads(); 2746 2747 CU_ASSERT(admin_io->internal.in_submit_request == false); 2748 2749 abort_io->u.abort.bio_to_abort = admin_io; 2750 abort_io->internal.in_submit_request = true; 2751 2752 bdev_nvme_submit_request(ch2, abort_io); 2753 2754 poll_threads(); 2755 2756 CU_ASSERT(abort_io->internal.in_submit_request == false); 2757 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2758 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2759 2760 /* Aborting the write request should succeed. */ 2761 write_io->internal.in_submit_request = true; 2762 bdev_nvme_submit_request(ch1, write_io); 2763 2764 CU_ASSERT(write_io->internal.in_submit_request == true); 2765 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2766 2767 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2768 abort_io->u.abort.bio_to_abort = write_io; 2769 abort_io->internal.in_submit_request = true; 2770 2771 bdev_nvme_submit_request(ch1, abort_io); 2772 2773 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2774 poll_threads(); 2775 2776 CU_ASSERT(abort_io->internal.in_submit_request == false); 2777 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2778 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2779 CU_ASSERT(write_io->internal.in_submit_request == false); 2780 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2781 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2782 2783 /* Aborting the fuse request should succeed. */ 2784 fuse_io->internal.in_submit_request = true; 2785 bdev_nvme_submit_request(ch1, fuse_io); 2786 2787 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2788 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2789 2790 abort_io->u.abort.bio_to_abort = fuse_io; 2791 abort_io->internal.in_submit_request = true; 2792 2793 bdev_nvme_submit_request(ch1, abort_io); 2794 2795 spdk_delay_us(10000); 2796 poll_threads(); 2797 2798 CU_ASSERT(abort_io->internal.in_submit_request == false); 2799 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2800 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2801 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2802 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2803 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2804 2805 /* Aborting the admin request should succeed. */ 2806 admin_io->internal.in_submit_request = true; 2807 bdev_nvme_submit_request(ch1, admin_io); 2808 2809 CU_ASSERT(admin_io->internal.in_submit_request == true); 2810 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2811 2812 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2813 abort_io->u.abort.bio_to_abort = admin_io; 2814 abort_io->internal.in_submit_request = true; 2815 2816 bdev_nvme_submit_request(ch2, abort_io); 2817 2818 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2819 poll_threads(); 2820 2821 CU_ASSERT(abort_io->internal.in_submit_request == false); 2822 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2823 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2824 CU_ASSERT(admin_io->internal.in_submit_request == false); 2825 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2826 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2827 2828 set_thread(0); 2829 2830 /* If qpair is disconnected, it is freed and then reconnected via resetting 2831 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2832 * while resetting the nvme_ctrlr. 2833 */ 2834 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2835 2836 poll_thread_times(0, 3); 2837 2838 CU_ASSERT(nvme_qpair1->qpair == NULL); 2839 CU_ASSERT(nvme_ctrlr->resetting == true); 2840 2841 write_io->internal.in_submit_request = true; 2842 2843 bdev_nvme_submit_request(ch1, write_io); 2844 2845 CU_ASSERT(write_io->internal.in_submit_request == true); 2846 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 2847 2848 /* Aborting the queued write request should succeed immediately. */ 2849 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2850 abort_io->u.abort.bio_to_abort = write_io; 2851 abort_io->internal.in_submit_request = true; 2852 2853 bdev_nvme_submit_request(ch1, abort_io); 2854 2855 CU_ASSERT(abort_io->internal.in_submit_request == false); 2856 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2857 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2858 CU_ASSERT(write_io->internal.in_submit_request == false); 2859 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2860 2861 poll_threads(); 2862 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2863 poll_threads(); 2864 2865 spdk_put_io_channel(ch1); 2866 2867 set_thread(1); 2868 2869 spdk_put_io_channel(ch2); 2870 2871 poll_threads(); 2872 2873 free(write_io); 2874 free(fuse_io); 2875 free(admin_io); 2876 free(abort_io); 2877 2878 set_thread(1); 2879 2880 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2881 CU_ASSERT(rc == 0); 2882 2883 poll_threads(); 2884 spdk_delay_us(1000); 2885 poll_threads(); 2886 2887 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2888 } 2889 2890 static void 2891 test_get_io_qpair(void) 2892 { 2893 struct spdk_nvme_transport_id trid = {}; 2894 struct spdk_nvme_ctrlr ctrlr = {}; 2895 struct nvme_ctrlr *nvme_ctrlr = NULL; 2896 struct spdk_io_channel *ch; 2897 struct nvme_ctrlr_channel *ctrlr_ch; 2898 struct spdk_nvme_qpair *qpair; 2899 int rc; 2900 2901 ut_init_trid(&trid); 2902 TAILQ_INIT(&ctrlr.active_io_qpairs); 2903 2904 set_thread(0); 2905 2906 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2907 CU_ASSERT(rc == 0); 2908 2909 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2910 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2911 2912 ch = spdk_get_io_channel(nvme_ctrlr); 2913 SPDK_CU_ASSERT_FATAL(ch != NULL); 2914 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2915 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2916 2917 qpair = bdev_nvme_get_io_qpair(ch); 2918 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2919 2920 spdk_put_io_channel(ch); 2921 2922 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2923 CU_ASSERT(rc == 0); 2924 2925 poll_threads(); 2926 spdk_delay_us(1000); 2927 poll_threads(); 2928 2929 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2930 } 2931 2932 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2933 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2934 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2935 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2936 */ 2937 static void 2938 test_bdev_unregister(void) 2939 { 2940 struct spdk_nvme_transport_id trid = {}; 2941 struct spdk_nvme_ctrlr *ctrlr; 2942 struct nvme_ctrlr *nvme_ctrlr; 2943 struct nvme_ns *nvme_ns1, *nvme_ns2; 2944 const int STRING_SIZE = 32; 2945 const char *attached_names[STRING_SIZE]; 2946 struct nvme_bdev *bdev1, *bdev2; 2947 int rc; 2948 2949 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2950 ut_init_trid(&trid); 2951 2952 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2953 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2954 2955 g_ut_attach_ctrlr_status = 0; 2956 g_ut_attach_bdev_count = 2; 2957 2958 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2959 attach_ctrlr_done, NULL, NULL, NULL, false); 2960 CU_ASSERT(rc == 0); 2961 2962 spdk_delay_us(1000); 2963 poll_threads(); 2964 2965 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2966 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2967 2968 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2969 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2970 2971 bdev1 = nvme_ns1->bdev; 2972 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2973 2974 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2975 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2976 2977 bdev2 = nvme_ns2->bdev; 2978 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2979 2980 bdev_nvme_destruct(&bdev1->disk); 2981 bdev_nvme_destruct(&bdev2->disk); 2982 2983 poll_threads(); 2984 2985 CU_ASSERT(nvme_ns1->bdev == NULL); 2986 CU_ASSERT(nvme_ns2->bdev == NULL); 2987 2988 nvme_ctrlr->destruct = true; 2989 _nvme_ctrlr_destruct(nvme_ctrlr); 2990 2991 poll_threads(); 2992 spdk_delay_us(1000); 2993 poll_threads(); 2994 2995 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2996 } 2997 2998 static void 2999 test_compare_ns(void) 3000 { 3001 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3002 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3003 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3004 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3005 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3006 3007 /* No IDs are defined. */ 3008 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3009 3010 /* Only EUI64 are defined and not matched. */ 3011 nsdata1.eui64 = 0xABCDEF0123456789; 3012 nsdata2.eui64 = 0xBBCDEF0123456789; 3013 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3014 3015 /* Only EUI64 are defined and matched. */ 3016 nsdata2.eui64 = 0xABCDEF0123456789; 3017 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3018 3019 /* Only NGUID are defined and not matched. */ 3020 nsdata1.eui64 = 0x0; 3021 nsdata2.eui64 = 0x0; 3022 nsdata1.nguid[0] = 0x12; 3023 nsdata2.nguid[0] = 0x10; 3024 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3025 3026 /* Only NGUID are defined and matched. */ 3027 nsdata2.nguid[0] = 0x12; 3028 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3029 3030 /* Only UUID are defined and not matched. */ 3031 nsdata1.nguid[0] = 0x0; 3032 nsdata2.nguid[0] = 0x0; 3033 ns1.uuid = &uuid1; 3034 ns2.uuid = &uuid2; 3035 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3036 3037 /* Only one UUID is defined. */ 3038 ns1.uuid = NULL; 3039 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3040 3041 /* Only UUID are defined and matched. */ 3042 ns1.uuid = &uuid2; 3043 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3044 3045 /* All EUI64, NGUID, and UUID are defined and matched. */ 3046 nsdata1.eui64 = 0x123456789ABCDEF; 3047 nsdata2.eui64 = 0x123456789ABCDEF; 3048 nsdata1.nguid[15] = 0x34; 3049 nsdata2.nguid[15] = 0x34; 3050 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3051 3052 /* CSI are not matched. */ 3053 ns1.csi = SPDK_NVME_CSI_ZNS; 3054 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3055 } 3056 3057 static void 3058 test_init_ana_log_page(void) 3059 { 3060 struct spdk_nvme_transport_id trid = {}; 3061 struct spdk_nvme_ctrlr *ctrlr; 3062 struct nvme_ctrlr *nvme_ctrlr; 3063 const int STRING_SIZE = 32; 3064 const char *attached_names[STRING_SIZE]; 3065 int rc; 3066 3067 set_thread(0); 3068 3069 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3070 ut_init_trid(&trid); 3071 3072 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3073 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3074 3075 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3076 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3077 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3078 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3079 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3080 3081 g_ut_attach_ctrlr_status = 0; 3082 g_ut_attach_bdev_count = 5; 3083 3084 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3085 attach_ctrlr_done, NULL, NULL, NULL, false); 3086 CU_ASSERT(rc == 0); 3087 3088 spdk_delay_us(1000); 3089 poll_threads(); 3090 3091 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3092 poll_threads(); 3093 3094 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3095 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3096 3097 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3098 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3099 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3100 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3101 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3102 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3103 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3104 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3105 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3106 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3107 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3108 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3109 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3110 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3111 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3112 3113 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3114 CU_ASSERT(rc == 0); 3115 3116 poll_threads(); 3117 spdk_delay_us(1000); 3118 poll_threads(); 3119 3120 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3121 } 3122 3123 static void 3124 init_accel(void) 3125 { 3126 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3127 sizeof(int), "accel_p"); 3128 } 3129 3130 static void 3131 fini_accel(void) 3132 { 3133 spdk_io_device_unregister(g_accel_p, NULL); 3134 } 3135 3136 static void 3137 test_get_memory_domains(void) 3138 { 3139 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3140 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3141 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3142 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3143 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3144 struct spdk_memory_domain *domains[4] = {}; 3145 int rc = 0; 3146 3147 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3148 3149 /* nvme controller doesn't have memory domains */ 3150 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3151 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3152 CU_ASSERT(rc == 0); 3153 CU_ASSERT(domains[0] == NULL); 3154 CU_ASSERT(domains[1] == NULL); 3155 3156 /* nvme controller has a memory domain */ 3157 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3158 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3159 CU_ASSERT(rc == 1); 3160 CU_ASSERT(domains[0] != NULL); 3161 memset(domains, 0, sizeof(domains)); 3162 3163 /* multipath, 2 controllers report 1 memory domain each */ 3164 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3165 3166 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3167 CU_ASSERT(rc == 2); 3168 CU_ASSERT(domains[0] != NULL); 3169 CU_ASSERT(domains[1] != NULL); 3170 memset(domains, 0, sizeof(domains)); 3171 3172 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3173 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3174 CU_ASSERT(rc == 2); 3175 3176 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3177 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3178 CU_ASSERT(rc == 2); 3179 CU_ASSERT(domains[0] == NULL); 3180 CU_ASSERT(domains[1] == NULL); 3181 3182 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3183 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3184 CU_ASSERT(rc == 2); 3185 CU_ASSERT(domains[0] != NULL); 3186 CU_ASSERT(domains[1] == NULL); 3187 memset(domains, 0, sizeof(domains)); 3188 3189 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3190 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3191 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3192 CU_ASSERT(rc == 4); 3193 CU_ASSERT(domains[0] != NULL); 3194 CU_ASSERT(domains[1] != NULL); 3195 CU_ASSERT(domains[2] != NULL); 3196 CU_ASSERT(domains[3] != NULL); 3197 memset(domains, 0, sizeof(domains)); 3198 3199 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3200 * Array size is less than the number of memory domains */ 3201 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3202 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3203 CU_ASSERT(rc == 4); 3204 CU_ASSERT(domains[0] != NULL); 3205 CU_ASSERT(domains[1] != NULL); 3206 CU_ASSERT(domains[2] != NULL); 3207 CU_ASSERT(domains[3] == NULL); 3208 memset(domains, 0, sizeof(domains)); 3209 3210 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3211 } 3212 3213 static void 3214 test_reconnect_qpair(void) 3215 { 3216 struct spdk_nvme_transport_id trid = {}; 3217 struct spdk_nvme_ctrlr *ctrlr; 3218 struct nvme_ctrlr *nvme_ctrlr; 3219 const int STRING_SIZE = 32; 3220 const char *attached_names[STRING_SIZE]; 3221 struct nvme_bdev *bdev; 3222 struct spdk_io_channel *ch1, *ch2; 3223 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3224 struct nvme_io_path *io_path1, *io_path2; 3225 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3226 int rc; 3227 3228 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3229 ut_init_trid(&trid); 3230 3231 set_thread(0); 3232 3233 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3234 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3235 3236 g_ut_attach_ctrlr_status = 0; 3237 g_ut_attach_bdev_count = 1; 3238 3239 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3240 attach_ctrlr_done, NULL, NULL, NULL, false); 3241 CU_ASSERT(rc == 0); 3242 3243 spdk_delay_us(1000); 3244 poll_threads(); 3245 3246 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3247 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3248 3249 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3250 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3251 3252 ch1 = spdk_get_io_channel(bdev); 3253 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3254 3255 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3256 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3257 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3258 nvme_qpair1 = io_path1->qpair; 3259 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3260 3261 set_thread(1); 3262 3263 ch2 = spdk_get_io_channel(bdev); 3264 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3265 3266 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3267 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3268 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3269 nvme_qpair2 = io_path2->qpair; 3270 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3271 3272 /* If a qpair is disconnected, it is freed and then reconnected via 3273 * resetting the corresponding nvme_ctrlr. 3274 */ 3275 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3276 ctrlr->is_failed = true; 3277 3278 poll_thread_times(1, 3); 3279 CU_ASSERT(nvme_qpair1->qpair != NULL); 3280 CU_ASSERT(nvme_qpair2->qpair == NULL); 3281 CU_ASSERT(nvme_ctrlr->resetting == true); 3282 3283 poll_thread_times(0, 3); 3284 CU_ASSERT(nvme_qpair1->qpair == NULL); 3285 CU_ASSERT(nvme_qpair2->qpair == NULL); 3286 CU_ASSERT(ctrlr->is_failed == true); 3287 3288 poll_thread_times(1, 2); 3289 poll_thread_times(0, 1); 3290 CU_ASSERT(ctrlr->is_failed == false); 3291 CU_ASSERT(ctrlr->adminq.is_connected == false); 3292 3293 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3294 poll_thread_times(0, 2); 3295 CU_ASSERT(ctrlr->adminq.is_connected == true); 3296 3297 poll_thread_times(0, 1); 3298 poll_thread_times(1, 1); 3299 CU_ASSERT(nvme_qpair1->qpair != NULL); 3300 CU_ASSERT(nvme_qpair2->qpair != NULL); 3301 CU_ASSERT(nvme_ctrlr->resetting == true); 3302 3303 poll_thread_times(0, 2); 3304 poll_thread_times(1, 1); 3305 poll_thread_times(0, 1); 3306 CU_ASSERT(nvme_ctrlr->resetting == false); 3307 3308 poll_threads(); 3309 3310 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3311 * fails, the qpair is just freed. 3312 */ 3313 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3314 ctrlr->is_failed = true; 3315 ctrlr->fail_reset = true; 3316 3317 poll_thread_times(1, 3); 3318 CU_ASSERT(nvme_qpair1->qpair != NULL); 3319 CU_ASSERT(nvme_qpair2->qpair == NULL); 3320 CU_ASSERT(nvme_ctrlr->resetting == true); 3321 3322 poll_thread_times(0, 3); 3323 poll_thread_times(1, 1); 3324 CU_ASSERT(nvme_qpair1->qpair == NULL); 3325 CU_ASSERT(nvme_qpair2->qpair == NULL); 3326 CU_ASSERT(ctrlr->is_failed == true); 3327 3328 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3329 poll_thread_times(0, 3); 3330 poll_thread_times(1, 1); 3331 poll_thread_times(0, 1); 3332 CU_ASSERT(ctrlr->is_failed == true); 3333 CU_ASSERT(nvme_ctrlr->resetting == false); 3334 CU_ASSERT(nvme_qpair1->qpair == NULL); 3335 CU_ASSERT(nvme_qpair2->qpair == NULL); 3336 3337 poll_threads(); 3338 3339 spdk_put_io_channel(ch2); 3340 3341 set_thread(0); 3342 3343 spdk_put_io_channel(ch1); 3344 3345 poll_threads(); 3346 3347 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3348 CU_ASSERT(rc == 0); 3349 3350 poll_threads(); 3351 spdk_delay_us(1000); 3352 poll_threads(); 3353 3354 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3355 } 3356 3357 static void 3358 test_create_bdev_ctrlr(void) 3359 { 3360 struct nvme_path_id path1 = {}, path2 = {}; 3361 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3362 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3363 const int STRING_SIZE = 32; 3364 const char *attached_names[STRING_SIZE]; 3365 int rc; 3366 3367 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3368 ut_init_trid(&path1.trid); 3369 ut_init_trid2(&path2.trid); 3370 3371 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3372 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3373 3374 g_ut_attach_ctrlr_status = 0; 3375 g_ut_attach_bdev_count = 0; 3376 3377 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3378 attach_ctrlr_done, NULL, NULL, NULL, true); 3379 CU_ASSERT(rc == 0); 3380 3381 spdk_delay_us(1000); 3382 poll_threads(); 3383 3384 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3385 poll_threads(); 3386 3387 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3388 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3389 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3390 3391 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3392 g_ut_attach_ctrlr_status = -EINVAL; 3393 3394 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3395 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3396 3397 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3398 3399 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3400 attach_ctrlr_done, NULL, NULL, NULL, true); 3401 CU_ASSERT(rc == 0); 3402 3403 spdk_delay_us(1000); 3404 poll_threads(); 3405 3406 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3407 poll_threads(); 3408 3409 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3410 3411 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3412 g_ut_attach_ctrlr_status = 0; 3413 3414 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3415 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3416 3417 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3418 attach_ctrlr_done, NULL, NULL, NULL, true); 3419 CU_ASSERT(rc == 0); 3420 3421 spdk_delay_us(1000); 3422 poll_threads(); 3423 3424 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3425 poll_threads(); 3426 3427 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3428 3429 /* Delete two ctrlrs at once. */ 3430 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3431 CU_ASSERT(rc == 0); 3432 3433 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3434 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3435 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3436 3437 poll_threads(); 3438 spdk_delay_us(1000); 3439 poll_threads(); 3440 3441 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3442 3443 /* Add two ctrlrs and delete one by one. */ 3444 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3445 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3446 3447 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3448 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3449 3450 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3451 attach_ctrlr_done, NULL, NULL, NULL, true); 3452 CU_ASSERT(rc == 0); 3453 3454 spdk_delay_us(1000); 3455 poll_threads(); 3456 3457 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3458 poll_threads(); 3459 3460 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3461 attach_ctrlr_done, NULL, NULL, NULL, true); 3462 CU_ASSERT(rc == 0); 3463 3464 spdk_delay_us(1000); 3465 poll_threads(); 3466 3467 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3468 poll_threads(); 3469 3470 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3471 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3472 3473 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3474 CU_ASSERT(rc == 0); 3475 3476 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3477 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3478 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3479 3480 poll_threads(); 3481 spdk_delay_us(1000); 3482 poll_threads(); 3483 3484 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3485 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3486 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3487 3488 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3489 CU_ASSERT(rc == 0); 3490 3491 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3492 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3493 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3494 3495 poll_threads(); 3496 spdk_delay_us(1000); 3497 poll_threads(); 3498 3499 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3500 } 3501 3502 static struct nvme_ns * 3503 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3504 { 3505 struct nvme_ns *nvme_ns; 3506 3507 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3508 if (nvme_ns->ctrlr == nvme_ctrlr) { 3509 return nvme_ns; 3510 } 3511 } 3512 3513 return NULL; 3514 } 3515 3516 static void 3517 test_add_multi_ns_to_bdev(void) 3518 { 3519 struct nvme_path_id path1 = {}, path2 = {}; 3520 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3521 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3522 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3523 struct nvme_ns *nvme_ns1, *nvme_ns2; 3524 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3525 const int STRING_SIZE = 32; 3526 const char *attached_names[STRING_SIZE]; 3527 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3528 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3529 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3530 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3531 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3532 int rc; 3533 3534 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3535 ut_init_trid(&path1.trid); 3536 ut_init_trid2(&path2.trid); 3537 3538 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3539 3540 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3541 * namespaces are populated. 3542 */ 3543 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3544 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3545 3546 ctrlr1->ns[1].is_active = false; 3547 ctrlr1->ns[4].is_active = false; 3548 ctrlr1->ns[0].uuid = &uuid1; 3549 ctrlr1->ns[2].uuid = &uuid3; 3550 ctrlr1->ns[3].uuid = &uuid4; 3551 3552 g_ut_attach_ctrlr_status = 0; 3553 g_ut_attach_bdev_count = 3; 3554 3555 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3556 attach_ctrlr_done, NULL, NULL, NULL, true); 3557 CU_ASSERT(rc == 0); 3558 3559 spdk_delay_us(1000); 3560 poll_threads(); 3561 3562 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3563 poll_threads(); 3564 3565 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3566 * namespaces are populated. The uuid of 4th namespace is different, and hence 3567 * adding 4th namespace to a bdev should fail. 3568 */ 3569 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3570 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3571 3572 ctrlr2->ns[2].is_active = false; 3573 ctrlr2->ns[4].is_active = false; 3574 ctrlr2->ns[0].uuid = &uuid1; 3575 ctrlr2->ns[1].uuid = &uuid2; 3576 ctrlr2->ns[3].uuid = &uuid44; 3577 3578 g_ut_attach_ctrlr_status = 0; 3579 g_ut_attach_bdev_count = 2; 3580 3581 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3582 attach_ctrlr_done, NULL, NULL, NULL, true); 3583 CU_ASSERT(rc == 0); 3584 3585 spdk_delay_us(1000); 3586 poll_threads(); 3587 3588 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3589 poll_threads(); 3590 3591 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3592 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3593 3594 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3595 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3596 3597 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3598 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3599 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3600 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3601 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3602 3603 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3604 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3605 3606 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3607 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3608 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3609 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3610 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3611 3612 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3613 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3614 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3615 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3616 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3617 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3618 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3619 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3620 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3621 3622 CU_ASSERT(bdev1->ref == 2); 3623 CU_ASSERT(bdev2->ref == 1); 3624 CU_ASSERT(bdev3->ref == 1); 3625 CU_ASSERT(bdev4->ref == 1); 3626 3627 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3628 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3629 CU_ASSERT(rc == 0); 3630 3631 poll_threads(); 3632 spdk_delay_us(1000); 3633 poll_threads(); 3634 3635 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3636 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3637 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3638 3639 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3640 CU_ASSERT(rc == 0); 3641 3642 poll_threads(); 3643 spdk_delay_us(1000); 3644 poll_threads(); 3645 3646 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3647 3648 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3649 * can be deleted when the bdev subsystem shutdown. 3650 */ 3651 g_ut_attach_bdev_count = 1; 3652 3653 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3654 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3655 3656 ctrlr1->ns[0].uuid = &uuid1; 3657 3658 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3659 attach_ctrlr_done, NULL, NULL, NULL, true); 3660 CU_ASSERT(rc == 0); 3661 3662 spdk_delay_us(1000); 3663 poll_threads(); 3664 3665 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3666 poll_threads(); 3667 3668 ut_init_trid2(&path2.trid); 3669 3670 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3671 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3672 3673 ctrlr2->ns[0].uuid = &uuid1; 3674 3675 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3676 attach_ctrlr_done, NULL, NULL, NULL, true); 3677 CU_ASSERT(rc == 0); 3678 3679 spdk_delay_us(1000); 3680 poll_threads(); 3681 3682 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3683 poll_threads(); 3684 3685 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3686 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3687 3688 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3689 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3690 3691 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3692 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3693 3694 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3695 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3696 3697 /* Check if a nvme_bdev has two nvme_ns. */ 3698 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3699 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3700 CU_ASSERT(nvme_ns1->bdev == bdev1); 3701 3702 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3703 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3704 CU_ASSERT(nvme_ns2->bdev == bdev1); 3705 3706 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3707 bdev_nvme_destruct(&bdev1->disk); 3708 3709 poll_threads(); 3710 3711 CU_ASSERT(nvme_ns1->bdev == NULL); 3712 CU_ASSERT(nvme_ns2->bdev == NULL); 3713 3714 nvme_ctrlr1->destruct = true; 3715 _nvme_ctrlr_destruct(nvme_ctrlr1); 3716 3717 poll_threads(); 3718 spdk_delay_us(1000); 3719 poll_threads(); 3720 3721 nvme_ctrlr2->destruct = true; 3722 _nvme_ctrlr_destruct(nvme_ctrlr2); 3723 3724 poll_threads(); 3725 spdk_delay_us(1000); 3726 poll_threads(); 3727 3728 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3729 } 3730 3731 static void 3732 test_add_multi_io_paths_to_nbdev_ch(void) 3733 { 3734 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3735 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3736 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3737 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3738 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3739 const int STRING_SIZE = 32; 3740 const char *attached_names[STRING_SIZE]; 3741 struct nvme_bdev *bdev; 3742 struct spdk_io_channel *ch; 3743 struct nvme_bdev_channel *nbdev_ch; 3744 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3745 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3746 int rc; 3747 3748 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3749 ut_init_trid(&path1.trid); 3750 ut_init_trid2(&path2.trid); 3751 ut_init_trid3(&path3.trid); 3752 g_ut_attach_ctrlr_status = 0; 3753 g_ut_attach_bdev_count = 1; 3754 3755 set_thread(1); 3756 3757 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3758 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3759 3760 ctrlr1->ns[0].uuid = &uuid1; 3761 3762 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3763 attach_ctrlr_done, NULL, NULL, NULL, true); 3764 CU_ASSERT(rc == 0); 3765 3766 spdk_delay_us(1000); 3767 poll_threads(); 3768 3769 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3770 poll_threads(); 3771 3772 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3773 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3774 3775 ctrlr2->ns[0].uuid = &uuid1; 3776 3777 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3778 attach_ctrlr_done, NULL, NULL, NULL, true); 3779 CU_ASSERT(rc == 0); 3780 3781 spdk_delay_us(1000); 3782 poll_threads(); 3783 3784 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3785 poll_threads(); 3786 3787 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3788 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3789 3790 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3791 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3792 3793 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3794 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3795 3796 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3797 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3798 3799 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3800 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3801 3802 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3803 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3804 3805 set_thread(0); 3806 3807 ch = spdk_get_io_channel(bdev); 3808 SPDK_CU_ASSERT_FATAL(ch != NULL); 3809 nbdev_ch = spdk_io_channel_get_ctx(ch); 3810 3811 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3812 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3813 3814 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3815 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3816 3817 set_thread(1); 3818 3819 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3820 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3821 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3822 3823 ctrlr3->ns[0].uuid = &uuid1; 3824 3825 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3826 attach_ctrlr_done, NULL, NULL, NULL, true); 3827 CU_ASSERT(rc == 0); 3828 3829 spdk_delay_us(1000); 3830 poll_threads(); 3831 3832 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3833 poll_threads(); 3834 3835 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3836 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3837 3838 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3839 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3840 3841 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3842 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3843 3844 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3845 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3846 CU_ASSERT(rc == 0); 3847 3848 poll_threads(); 3849 spdk_delay_us(1000); 3850 poll_threads(); 3851 3852 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3853 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3854 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3855 3856 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3857 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3858 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3859 3860 set_thread(0); 3861 3862 spdk_put_io_channel(ch); 3863 3864 poll_threads(); 3865 3866 set_thread(1); 3867 3868 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3869 CU_ASSERT(rc == 0); 3870 3871 poll_threads(); 3872 spdk_delay_us(1000); 3873 poll_threads(); 3874 3875 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3876 } 3877 3878 static void 3879 test_admin_path(void) 3880 { 3881 struct nvme_path_id path1 = {}, path2 = {}; 3882 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3883 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3884 const int STRING_SIZE = 32; 3885 const char *attached_names[STRING_SIZE]; 3886 struct nvme_bdev *bdev; 3887 struct spdk_io_channel *ch; 3888 struct spdk_bdev_io *bdev_io; 3889 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3890 int rc; 3891 3892 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3893 ut_init_trid(&path1.trid); 3894 ut_init_trid2(&path2.trid); 3895 g_ut_attach_ctrlr_status = 0; 3896 g_ut_attach_bdev_count = 1; 3897 3898 set_thread(0); 3899 3900 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3901 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3902 3903 ctrlr1->ns[0].uuid = &uuid1; 3904 3905 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3906 attach_ctrlr_done, NULL, NULL, NULL, true); 3907 CU_ASSERT(rc == 0); 3908 3909 spdk_delay_us(1000); 3910 poll_threads(); 3911 3912 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3913 poll_threads(); 3914 3915 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3916 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3917 3918 ctrlr2->ns[0].uuid = &uuid1; 3919 3920 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3921 attach_ctrlr_done, NULL, NULL, NULL, true); 3922 CU_ASSERT(rc == 0); 3923 3924 spdk_delay_us(1000); 3925 poll_threads(); 3926 3927 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3928 poll_threads(); 3929 3930 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3931 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3932 3933 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3934 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3935 3936 ch = spdk_get_io_channel(bdev); 3937 SPDK_CU_ASSERT_FATAL(ch != NULL); 3938 3939 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3940 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3941 3942 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3943 * submitted to ctrlr2. 3944 */ 3945 ctrlr1->is_failed = true; 3946 bdev_io->internal.in_submit_request = true; 3947 3948 bdev_nvme_submit_request(ch, bdev_io); 3949 3950 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3951 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3952 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3953 3954 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3955 poll_threads(); 3956 3957 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3958 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3959 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3960 3961 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3962 ctrlr2->is_failed = true; 3963 bdev_io->internal.in_submit_request = true; 3964 3965 bdev_nvme_submit_request(ch, bdev_io); 3966 3967 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3968 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3969 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3970 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3971 3972 free(bdev_io); 3973 3974 spdk_put_io_channel(ch); 3975 3976 poll_threads(); 3977 3978 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3979 CU_ASSERT(rc == 0); 3980 3981 poll_threads(); 3982 spdk_delay_us(1000); 3983 poll_threads(); 3984 3985 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3986 } 3987 3988 static struct nvme_io_path * 3989 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3990 struct nvme_ctrlr *nvme_ctrlr) 3991 { 3992 struct nvme_io_path *io_path; 3993 3994 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3995 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3996 return io_path; 3997 } 3998 } 3999 4000 return NULL; 4001 } 4002 4003 static void 4004 test_reset_bdev_ctrlr(void) 4005 { 4006 struct nvme_path_id path1 = {}, path2 = {}; 4007 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4008 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4009 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4010 struct nvme_path_id *curr_path1, *curr_path2; 4011 const int STRING_SIZE = 32; 4012 const char *attached_names[STRING_SIZE]; 4013 struct nvme_bdev *bdev; 4014 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4015 struct nvme_bdev_io *first_bio; 4016 struct spdk_io_channel *ch1, *ch2; 4017 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4018 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4019 int rc; 4020 4021 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4022 ut_init_trid(&path1.trid); 4023 ut_init_trid2(&path2.trid); 4024 g_ut_attach_ctrlr_status = 0; 4025 g_ut_attach_bdev_count = 1; 4026 4027 set_thread(0); 4028 4029 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4030 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4031 4032 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4033 attach_ctrlr_done, NULL, NULL, NULL, true); 4034 CU_ASSERT(rc == 0); 4035 4036 spdk_delay_us(1000); 4037 poll_threads(); 4038 4039 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4040 poll_threads(); 4041 4042 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4043 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4044 4045 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4046 attach_ctrlr_done, NULL, NULL, NULL, true); 4047 CU_ASSERT(rc == 0); 4048 4049 spdk_delay_us(1000); 4050 poll_threads(); 4051 4052 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4053 poll_threads(); 4054 4055 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4056 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4057 4058 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4059 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4060 4061 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4062 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4063 4064 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4065 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4066 4067 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4068 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4069 4070 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4071 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4072 4073 set_thread(0); 4074 4075 ch1 = spdk_get_io_channel(bdev); 4076 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4077 4078 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4079 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4080 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4081 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4082 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4083 4084 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4085 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4086 4087 set_thread(1); 4088 4089 ch2 = spdk_get_io_channel(bdev); 4090 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4091 4092 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4093 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4094 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4095 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4096 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4097 4098 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4099 4100 /* The first reset request from bdev_io is submitted on thread 0. 4101 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4102 * 4103 * A few extra polls are necessary after resetting ctrlr1 to check 4104 * pending reset requests for ctrlr1. 4105 */ 4106 ctrlr1->is_failed = true; 4107 curr_path1->last_failed_tsc = spdk_get_ticks(); 4108 ctrlr2->is_failed = true; 4109 curr_path2->last_failed_tsc = spdk_get_ticks(); 4110 4111 set_thread(0); 4112 4113 bdev_nvme_submit_request(ch1, first_bdev_io); 4114 CU_ASSERT(first_bio->io_path == io_path11); 4115 CU_ASSERT(nvme_ctrlr1->resetting == true); 4116 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4117 4118 poll_thread_times(0, 3); 4119 CU_ASSERT(io_path11->qpair->qpair == NULL); 4120 CU_ASSERT(io_path21->qpair->qpair != NULL); 4121 4122 poll_thread_times(1, 2); 4123 CU_ASSERT(io_path11->qpair->qpair == NULL); 4124 CU_ASSERT(io_path21->qpair->qpair == NULL); 4125 CU_ASSERT(ctrlr1->is_failed == true); 4126 4127 poll_thread_times(0, 1); 4128 CU_ASSERT(nvme_ctrlr1->resetting == true); 4129 CU_ASSERT(ctrlr1->is_failed == false); 4130 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4131 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4132 4133 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4134 poll_thread_times(0, 2); 4135 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4136 4137 poll_thread_times(0, 1); 4138 CU_ASSERT(io_path11->qpair->qpair != NULL); 4139 CU_ASSERT(io_path21->qpair->qpair == NULL); 4140 4141 poll_thread_times(1, 1); 4142 CU_ASSERT(io_path11->qpair->qpair != NULL); 4143 CU_ASSERT(io_path21->qpair->qpair != NULL); 4144 4145 poll_thread_times(0, 2); 4146 CU_ASSERT(nvme_ctrlr1->resetting == true); 4147 poll_thread_times(1, 1); 4148 CU_ASSERT(nvme_ctrlr1->resetting == true); 4149 poll_thread_times(0, 2); 4150 CU_ASSERT(nvme_ctrlr1->resetting == false); 4151 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4152 CU_ASSERT(first_bio->io_path == io_path12); 4153 CU_ASSERT(nvme_ctrlr2->resetting == true); 4154 4155 poll_thread_times(0, 3); 4156 CU_ASSERT(io_path12->qpair->qpair == NULL); 4157 CU_ASSERT(io_path22->qpair->qpair != NULL); 4158 4159 poll_thread_times(1, 2); 4160 CU_ASSERT(io_path12->qpair->qpair == NULL); 4161 CU_ASSERT(io_path22->qpair->qpair == NULL); 4162 CU_ASSERT(ctrlr2->is_failed == true); 4163 4164 poll_thread_times(0, 1); 4165 CU_ASSERT(nvme_ctrlr2->resetting == true); 4166 CU_ASSERT(ctrlr2->is_failed == false); 4167 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4168 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4169 4170 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4171 poll_thread_times(0, 2); 4172 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4173 4174 poll_thread_times(0, 1); 4175 CU_ASSERT(io_path12->qpair->qpair != NULL); 4176 CU_ASSERT(io_path22->qpair->qpair == NULL); 4177 4178 poll_thread_times(1, 2); 4179 CU_ASSERT(io_path12->qpair->qpair != NULL); 4180 CU_ASSERT(io_path22->qpair->qpair != NULL); 4181 4182 poll_thread_times(0, 2); 4183 CU_ASSERT(nvme_ctrlr2->resetting == true); 4184 poll_thread_times(1, 1); 4185 CU_ASSERT(nvme_ctrlr2->resetting == true); 4186 poll_thread_times(0, 2); 4187 CU_ASSERT(first_bio->io_path == NULL); 4188 CU_ASSERT(nvme_ctrlr2->resetting == false); 4189 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4190 4191 poll_threads(); 4192 4193 /* There is a race between two reset requests from bdev_io. 4194 * 4195 * The first reset request is submitted on thread 0, and the second reset 4196 * request is submitted on thread 1 while the first is resetting ctrlr1. 4197 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4198 * both reset requests go to ctrlr2. The first comes earlier than the second. 4199 * The second is pending on ctrlr2 again. After the first completes resetting 4200 * ctrl2, both complete successfully. 4201 */ 4202 ctrlr1->is_failed = true; 4203 curr_path1->last_failed_tsc = spdk_get_ticks(); 4204 ctrlr2->is_failed = true; 4205 curr_path2->last_failed_tsc = spdk_get_ticks(); 4206 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4207 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4208 4209 set_thread(0); 4210 4211 bdev_nvme_submit_request(ch1, first_bdev_io); 4212 4213 set_thread(1); 4214 4215 bdev_nvme_submit_request(ch2, second_bdev_io); 4216 4217 CU_ASSERT(nvme_ctrlr1->resetting == true); 4218 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4219 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == 4220 (struct nvme_bdev_io *)second_bdev_io->driver_ctx); 4221 4222 poll_threads(); 4223 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4224 poll_threads(); 4225 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4226 poll_threads(); 4227 4228 CU_ASSERT(ctrlr1->is_failed == false); 4229 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4230 CU_ASSERT(ctrlr2->is_failed == false); 4231 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4232 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4233 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4234 4235 set_thread(0); 4236 4237 spdk_put_io_channel(ch1); 4238 4239 set_thread(1); 4240 4241 spdk_put_io_channel(ch2); 4242 4243 poll_threads(); 4244 4245 set_thread(0); 4246 4247 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4248 CU_ASSERT(rc == 0); 4249 4250 poll_threads(); 4251 spdk_delay_us(1000); 4252 poll_threads(); 4253 4254 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4255 4256 free(first_bdev_io); 4257 free(second_bdev_io); 4258 } 4259 4260 static void 4261 test_find_io_path(void) 4262 { 4263 struct nvme_bdev_channel nbdev_ch = { 4264 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4265 }; 4266 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4267 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4268 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4269 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4270 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4271 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4272 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4273 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4274 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4275 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4276 4277 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4278 4279 /* Test if io_path whose ANA state is not accessible is excluded. */ 4280 4281 nvme_qpair1.qpair = &qpair1; 4282 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4283 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4284 4285 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4286 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4287 4288 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4289 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4290 4291 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4292 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4293 4294 nbdev_ch.current_io_path = NULL; 4295 4296 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4297 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4298 4299 nbdev_ch.current_io_path = NULL; 4300 4301 /* Test if io_path whose qpair is resetting is excluded. */ 4302 4303 nvme_qpair1.qpair = NULL; 4304 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4305 4306 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4307 4308 /* Test if ANA optimized state or the first found ANA non-optimized state 4309 * is prioritized. 4310 */ 4311 4312 nvme_qpair1.qpair = &qpair1; 4313 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4314 nvme_qpair2.qpair = &qpair2; 4315 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4316 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4317 4318 nbdev_ch.current_io_path = NULL; 4319 4320 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4321 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4322 4323 nbdev_ch.current_io_path = NULL; 4324 } 4325 4326 static void 4327 test_retry_io_if_ana_state_is_updating(void) 4328 { 4329 struct nvme_path_id path = {}; 4330 struct nvme_ctrlr_opts opts = {}; 4331 struct spdk_nvme_ctrlr *ctrlr; 4332 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4333 struct nvme_ctrlr *nvme_ctrlr; 4334 const int STRING_SIZE = 32; 4335 const char *attached_names[STRING_SIZE]; 4336 struct nvme_bdev *bdev; 4337 struct nvme_ns *nvme_ns; 4338 struct spdk_bdev_io *bdev_io1; 4339 struct spdk_io_channel *ch; 4340 struct nvme_bdev_channel *nbdev_ch; 4341 struct nvme_io_path *io_path; 4342 struct nvme_qpair *nvme_qpair; 4343 int rc; 4344 4345 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4346 ut_init_trid(&path.trid); 4347 4348 set_thread(0); 4349 4350 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4351 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4352 4353 g_ut_attach_ctrlr_status = 0; 4354 g_ut_attach_bdev_count = 1; 4355 4356 opts.ctrlr_loss_timeout_sec = -1; 4357 opts.reconnect_delay_sec = 1; 4358 4359 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4360 attach_ctrlr_done, NULL, NULL, &opts, false); 4361 CU_ASSERT(rc == 0); 4362 4363 spdk_delay_us(1000); 4364 poll_threads(); 4365 4366 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4367 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4368 4369 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4370 CU_ASSERT(nvme_ctrlr != NULL); 4371 4372 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4373 CU_ASSERT(bdev != NULL); 4374 4375 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4376 CU_ASSERT(nvme_ns != NULL); 4377 4378 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4379 ut_bdev_io_set_buf(bdev_io1); 4380 4381 ch = spdk_get_io_channel(bdev); 4382 SPDK_CU_ASSERT_FATAL(ch != NULL); 4383 4384 nbdev_ch = spdk_io_channel_get_ctx(ch); 4385 4386 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4387 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4388 4389 nvme_qpair = io_path->qpair; 4390 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4391 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4392 4393 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4394 4395 /* If qpair is connected, I/O should succeed. */ 4396 bdev_io1->internal.in_submit_request = true; 4397 4398 bdev_nvme_submit_request(ch, bdev_io1); 4399 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4400 4401 poll_threads(); 4402 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4403 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4404 4405 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4406 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4407 nbdev_ch->current_io_path = NULL; 4408 4409 bdev_io1->internal.in_submit_request = true; 4410 4411 bdev_nvme_submit_request(ch, bdev_io1); 4412 4413 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4414 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4415 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4416 4417 /* ANA state became accessible while I/O was queued. */ 4418 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4419 4420 spdk_delay_us(1000000); 4421 4422 poll_thread_times(0, 1); 4423 4424 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4425 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4426 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4427 4428 poll_threads(); 4429 4430 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4431 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4432 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4433 4434 free(bdev_io1); 4435 4436 spdk_put_io_channel(ch); 4437 4438 poll_threads(); 4439 4440 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4441 CU_ASSERT(rc == 0); 4442 4443 poll_threads(); 4444 spdk_delay_us(1000); 4445 poll_threads(); 4446 4447 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4448 } 4449 4450 static void 4451 test_retry_io_for_io_path_error(void) 4452 { 4453 struct nvme_path_id path1 = {}, path2 = {}; 4454 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4455 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4456 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4457 const int STRING_SIZE = 32; 4458 const char *attached_names[STRING_SIZE]; 4459 struct nvme_bdev *bdev; 4460 struct nvme_ns *nvme_ns1, *nvme_ns2; 4461 struct spdk_bdev_io *bdev_io; 4462 struct nvme_bdev_io *bio; 4463 struct spdk_io_channel *ch; 4464 struct nvme_bdev_channel *nbdev_ch; 4465 struct nvme_io_path *io_path1, *io_path2; 4466 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4467 struct ut_nvme_req *req; 4468 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4469 int rc; 4470 4471 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4472 ut_init_trid(&path1.trid); 4473 ut_init_trid2(&path2.trid); 4474 4475 g_opts.bdev_retry_count = 1; 4476 4477 set_thread(0); 4478 4479 g_ut_attach_ctrlr_status = 0; 4480 g_ut_attach_bdev_count = 1; 4481 4482 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4483 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4484 4485 ctrlr1->ns[0].uuid = &uuid1; 4486 4487 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4488 attach_ctrlr_done, NULL, NULL, NULL, true); 4489 CU_ASSERT(rc == 0); 4490 4491 spdk_delay_us(1000); 4492 poll_threads(); 4493 4494 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4495 poll_threads(); 4496 4497 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4498 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4499 4500 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4501 CU_ASSERT(nvme_ctrlr1 != NULL); 4502 4503 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4504 CU_ASSERT(bdev != NULL); 4505 4506 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4507 CU_ASSERT(nvme_ns1 != NULL); 4508 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4509 4510 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4511 ut_bdev_io_set_buf(bdev_io); 4512 4513 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4514 4515 ch = spdk_get_io_channel(bdev); 4516 SPDK_CU_ASSERT_FATAL(ch != NULL); 4517 4518 nbdev_ch = spdk_io_channel_get_ctx(ch); 4519 4520 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4521 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4522 4523 nvme_qpair1 = io_path1->qpair; 4524 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4525 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4526 4527 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4528 4529 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4530 bdev_io->internal.in_submit_request = true; 4531 4532 bdev_nvme_submit_request(ch, bdev_io); 4533 4534 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4535 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4536 4537 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4538 SPDK_CU_ASSERT_FATAL(req != NULL); 4539 4540 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4541 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4542 req->cpl.status.dnr = 1; 4543 4544 poll_thread_times(0, 1); 4545 4546 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4547 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4548 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4549 4550 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4551 bdev_io->internal.in_submit_request = true; 4552 4553 bdev_nvme_submit_request(ch, bdev_io); 4554 4555 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4556 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4557 4558 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4559 SPDK_CU_ASSERT_FATAL(req != NULL); 4560 4561 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4562 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4563 4564 poll_thread_times(0, 1); 4565 4566 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4567 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4568 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4569 4570 poll_threads(); 4571 4572 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4573 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4574 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4575 4576 /* Add io_path2 dynamically, and create a multipath configuration. */ 4577 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4578 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4579 4580 ctrlr2->ns[0].uuid = &uuid1; 4581 4582 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4583 attach_ctrlr_done, NULL, NULL, NULL, true); 4584 CU_ASSERT(rc == 0); 4585 4586 spdk_delay_us(1000); 4587 poll_threads(); 4588 4589 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4590 poll_threads(); 4591 4592 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4593 CU_ASSERT(nvme_ctrlr2 != NULL); 4594 4595 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4596 CU_ASSERT(nvme_ns2 != NULL); 4597 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4598 4599 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4600 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4601 4602 nvme_qpair2 = io_path2->qpair; 4603 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4604 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4605 4606 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4607 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4608 * So after a retry, I/O is submitted to io_path2 and should succeed. 4609 */ 4610 bdev_io->internal.in_submit_request = true; 4611 4612 bdev_nvme_submit_request(ch, bdev_io); 4613 4614 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4615 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4616 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4617 4618 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4619 SPDK_CU_ASSERT_FATAL(req != NULL); 4620 4621 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4622 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4623 4624 poll_thread_times(0, 1); 4625 4626 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4627 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4628 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4629 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4630 4631 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4632 nvme_qpair1->qpair = NULL; 4633 4634 poll_threads(); 4635 4636 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4637 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4638 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4639 4640 free(bdev_io); 4641 4642 spdk_put_io_channel(ch); 4643 4644 poll_threads(); 4645 4646 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4647 CU_ASSERT(rc == 0); 4648 4649 poll_threads(); 4650 spdk_delay_us(1000); 4651 poll_threads(); 4652 4653 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4654 4655 g_opts.bdev_retry_count = 0; 4656 } 4657 4658 static void 4659 test_retry_io_count(void) 4660 { 4661 struct nvme_path_id path = {}; 4662 struct spdk_nvme_ctrlr *ctrlr; 4663 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4664 struct nvme_ctrlr *nvme_ctrlr; 4665 const int STRING_SIZE = 32; 4666 const char *attached_names[STRING_SIZE]; 4667 struct nvme_bdev *bdev; 4668 struct nvme_ns *nvme_ns; 4669 struct spdk_bdev_io *bdev_io; 4670 struct nvme_bdev_io *bio; 4671 struct spdk_io_channel *ch; 4672 struct nvme_bdev_channel *nbdev_ch; 4673 struct nvme_io_path *io_path; 4674 struct nvme_qpair *nvme_qpair; 4675 struct ut_nvme_req *req; 4676 int rc; 4677 4678 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4679 ut_init_trid(&path.trid); 4680 4681 set_thread(0); 4682 4683 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4684 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4685 4686 g_ut_attach_ctrlr_status = 0; 4687 g_ut_attach_bdev_count = 1; 4688 4689 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4690 attach_ctrlr_done, NULL, NULL, NULL, false); 4691 CU_ASSERT(rc == 0); 4692 4693 spdk_delay_us(1000); 4694 poll_threads(); 4695 4696 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4697 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4698 4699 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4700 CU_ASSERT(nvme_ctrlr != NULL); 4701 4702 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4703 CU_ASSERT(bdev != NULL); 4704 4705 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4706 CU_ASSERT(nvme_ns != NULL); 4707 4708 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4709 ut_bdev_io_set_buf(bdev_io); 4710 4711 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4712 4713 ch = spdk_get_io_channel(bdev); 4714 SPDK_CU_ASSERT_FATAL(ch != NULL); 4715 4716 nbdev_ch = spdk_io_channel_get_ctx(ch); 4717 4718 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4719 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4720 4721 nvme_qpair = io_path->qpair; 4722 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4723 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4724 4725 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4726 4727 /* If I/O is aborted by request, it should not be retried. */ 4728 g_opts.bdev_retry_count = 1; 4729 4730 bdev_io->internal.in_submit_request = true; 4731 4732 bdev_nvme_submit_request(ch, bdev_io); 4733 4734 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4735 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4736 4737 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4738 SPDK_CU_ASSERT_FATAL(req != NULL); 4739 4740 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4741 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4742 4743 poll_thread_times(0, 1); 4744 4745 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4746 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4747 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4748 4749 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4750 * the failed I/O should not be retried. 4751 */ 4752 g_opts.bdev_retry_count = 4; 4753 4754 bdev_io->internal.in_submit_request = true; 4755 4756 bdev_nvme_submit_request(ch, bdev_io); 4757 4758 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4759 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4760 4761 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4762 SPDK_CU_ASSERT_FATAL(req != NULL); 4763 4764 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4765 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4766 bio->retry_count = 4; 4767 4768 poll_thread_times(0, 1); 4769 4770 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4771 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4772 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4773 4774 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4775 g_opts.bdev_retry_count = -1; 4776 4777 bdev_io->internal.in_submit_request = true; 4778 4779 bdev_nvme_submit_request(ch, bdev_io); 4780 4781 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4782 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4783 4784 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4785 SPDK_CU_ASSERT_FATAL(req != NULL); 4786 4787 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4788 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4789 bio->retry_count = 4; 4790 4791 poll_thread_times(0, 1); 4792 4793 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4794 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4795 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4796 4797 poll_threads(); 4798 4799 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4800 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4801 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4802 4803 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4804 * the failed I/O should be retried. 4805 */ 4806 g_opts.bdev_retry_count = 4; 4807 4808 bdev_io->internal.in_submit_request = true; 4809 4810 bdev_nvme_submit_request(ch, bdev_io); 4811 4812 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4813 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4814 4815 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4816 SPDK_CU_ASSERT_FATAL(req != NULL); 4817 4818 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4819 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4820 bio->retry_count = 3; 4821 4822 poll_thread_times(0, 1); 4823 4824 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4825 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4826 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4827 4828 poll_threads(); 4829 4830 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4831 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4832 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4833 4834 free(bdev_io); 4835 4836 spdk_put_io_channel(ch); 4837 4838 poll_threads(); 4839 4840 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4841 CU_ASSERT(rc == 0); 4842 4843 poll_threads(); 4844 spdk_delay_us(1000); 4845 poll_threads(); 4846 4847 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4848 4849 g_opts.bdev_retry_count = 0; 4850 } 4851 4852 static void 4853 test_concurrent_read_ana_log_page(void) 4854 { 4855 struct spdk_nvme_transport_id trid = {}; 4856 struct spdk_nvme_ctrlr *ctrlr; 4857 struct nvme_ctrlr *nvme_ctrlr; 4858 const int STRING_SIZE = 32; 4859 const char *attached_names[STRING_SIZE]; 4860 int rc; 4861 4862 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4863 ut_init_trid(&trid); 4864 4865 set_thread(0); 4866 4867 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4868 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4869 4870 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4871 4872 g_ut_attach_ctrlr_status = 0; 4873 g_ut_attach_bdev_count = 1; 4874 4875 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4876 attach_ctrlr_done, NULL, NULL, NULL, false); 4877 CU_ASSERT(rc == 0); 4878 4879 spdk_delay_us(1000); 4880 poll_threads(); 4881 4882 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4883 poll_threads(); 4884 4885 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4886 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4887 4888 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4889 4890 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4891 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4892 4893 /* Following read request should be rejected. */ 4894 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4895 4896 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4897 4898 set_thread(1); 4899 4900 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4901 4902 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4903 4904 /* Reset request while reading ANA log page should not be rejected. */ 4905 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4906 CU_ASSERT(rc == 0); 4907 4908 poll_threads(); 4909 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4910 poll_threads(); 4911 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4912 poll_threads(); 4913 4914 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4915 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4916 4917 /* Read ANA log page while resetting ctrlr should be rejected. */ 4918 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4919 CU_ASSERT(rc == 0); 4920 4921 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4922 4923 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4924 4925 poll_threads(); 4926 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4927 poll_threads(); 4928 4929 set_thread(0); 4930 4931 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4932 CU_ASSERT(rc == 0); 4933 4934 poll_threads(); 4935 spdk_delay_us(1000); 4936 poll_threads(); 4937 4938 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4939 } 4940 4941 static void 4942 test_retry_io_for_ana_error(void) 4943 { 4944 struct nvme_path_id path = {}; 4945 struct spdk_nvme_ctrlr *ctrlr; 4946 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4947 struct nvme_ctrlr *nvme_ctrlr; 4948 const int STRING_SIZE = 32; 4949 const char *attached_names[STRING_SIZE]; 4950 struct nvme_bdev *bdev; 4951 struct nvme_ns *nvme_ns; 4952 struct spdk_bdev_io *bdev_io; 4953 struct nvme_bdev_io *bio; 4954 struct spdk_io_channel *ch; 4955 struct nvme_bdev_channel *nbdev_ch; 4956 struct nvme_io_path *io_path; 4957 struct nvme_qpair *nvme_qpair; 4958 struct ut_nvme_req *req; 4959 uint64_t now; 4960 int rc; 4961 4962 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4963 ut_init_trid(&path.trid); 4964 4965 g_opts.bdev_retry_count = 1; 4966 4967 set_thread(0); 4968 4969 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4970 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4971 4972 g_ut_attach_ctrlr_status = 0; 4973 g_ut_attach_bdev_count = 1; 4974 4975 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4976 attach_ctrlr_done, NULL, NULL, NULL, false); 4977 CU_ASSERT(rc == 0); 4978 4979 spdk_delay_us(1000); 4980 poll_threads(); 4981 4982 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4983 poll_threads(); 4984 4985 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4986 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4987 4988 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4989 CU_ASSERT(nvme_ctrlr != NULL); 4990 4991 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4992 CU_ASSERT(bdev != NULL); 4993 4994 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4995 CU_ASSERT(nvme_ns != NULL); 4996 4997 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4998 ut_bdev_io_set_buf(bdev_io); 4999 5000 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5001 5002 ch = spdk_get_io_channel(bdev); 5003 SPDK_CU_ASSERT_FATAL(ch != NULL); 5004 5005 nbdev_ch = spdk_io_channel_get_ctx(ch); 5006 5007 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5008 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5009 5010 nvme_qpair = io_path->qpair; 5011 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5012 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5013 5014 now = spdk_get_ticks(); 5015 5016 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5017 5018 /* If I/O got ANA error, it should be queued, the corresponding namespace 5019 * should be freezed and its ANA state should be updated. 5020 */ 5021 bdev_io->internal.in_submit_request = true; 5022 5023 bdev_nvme_submit_request(ch, bdev_io); 5024 5025 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5026 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5027 5028 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5029 SPDK_CU_ASSERT_FATAL(req != NULL); 5030 5031 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5032 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5033 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5034 5035 poll_thread_times(0, 1); 5036 5037 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5038 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5039 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5040 /* I/O should be retried immediately. */ 5041 CU_ASSERT(bio->retry_ticks == now); 5042 CU_ASSERT(nvme_ns->ana_state_updating == true); 5043 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5044 5045 poll_threads(); 5046 5047 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5048 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5049 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5050 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5051 /* I/O should be retried after a second if no I/O path was found but 5052 * any I/O path may become available. 5053 */ 5054 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5055 5056 /* Namespace should be unfreezed after completing to update its ANA state. */ 5057 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5058 poll_threads(); 5059 5060 CU_ASSERT(nvme_ns->ana_state_updating == false); 5061 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5062 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5063 5064 /* Retry the queued I/O should succeed. */ 5065 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5066 poll_threads(); 5067 5068 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5069 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5070 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5071 5072 free(bdev_io); 5073 5074 spdk_put_io_channel(ch); 5075 5076 poll_threads(); 5077 5078 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5079 CU_ASSERT(rc == 0); 5080 5081 poll_threads(); 5082 spdk_delay_us(1000); 5083 poll_threads(); 5084 5085 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5086 5087 g_opts.bdev_retry_count = 0; 5088 } 5089 5090 static void 5091 test_check_io_error_resiliency_params(void) 5092 { 5093 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5094 * 3rd parameter is fast_io_fail_timeout_sec. 5095 */ 5096 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5097 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5098 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5099 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5100 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5101 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5102 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5103 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5104 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5105 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5106 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5107 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5108 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5109 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5110 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5111 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5112 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5113 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5114 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5115 } 5116 5117 static void 5118 test_retry_io_if_ctrlr_is_resetting(void) 5119 { 5120 struct nvme_path_id path = {}; 5121 struct nvme_ctrlr_opts opts = {}; 5122 struct spdk_nvme_ctrlr *ctrlr; 5123 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5124 struct nvme_ctrlr *nvme_ctrlr; 5125 const int STRING_SIZE = 32; 5126 const char *attached_names[STRING_SIZE]; 5127 struct nvme_bdev *bdev; 5128 struct nvme_ns *nvme_ns; 5129 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5130 struct spdk_io_channel *ch; 5131 struct nvme_bdev_channel *nbdev_ch; 5132 struct nvme_io_path *io_path; 5133 struct nvme_qpair *nvme_qpair; 5134 int rc; 5135 5136 g_opts.bdev_retry_count = 1; 5137 5138 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5139 ut_init_trid(&path.trid); 5140 5141 set_thread(0); 5142 5143 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5144 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5145 5146 g_ut_attach_ctrlr_status = 0; 5147 g_ut_attach_bdev_count = 1; 5148 5149 opts.ctrlr_loss_timeout_sec = -1; 5150 opts.reconnect_delay_sec = 1; 5151 5152 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5153 attach_ctrlr_done, NULL, NULL, &opts, false); 5154 CU_ASSERT(rc == 0); 5155 5156 spdk_delay_us(1000); 5157 poll_threads(); 5158 5159 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5160 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5161 5162 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5163 CU_ASSERT(nvme_ctrlr != NULL); 5164 5165 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5166 CU_ASSERT(bdev != NULL); 5167 5168 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5169 CU_ASSERT(nvme_ns != NULL); 5170 5171 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5172 ut_bdev_io_set_buf(bdev_io1); 5173 5174 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5175 ut_bdev_io_set_buf(bdev_io2); 5176 5177 ch = spdk_get_io_channel(bdev); 5178 SPDK_CU_ASSERT_FATAL(ch != NULL); 5179 5180 nbdev_ch = spdk_io_channel_get_ctx(ch); 5181 5182 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5183 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5184 5185 nvme_qpair = io_path->qpair; 5186 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5187 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5188 5189 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5190 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5191 5192 /* If qpair is connected, I/O should succeed. */ 5193 bdev_io1->internal.in_submit_request = true; 5194 5195 bdev_nvme_submit_request(ch, bdev_io1); 5196 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5197 5198 poll_threads(); 5199 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5200 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5201 5202 /* If qpair is disconnected, it is freed and then reconnected via resetting 5203 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5204 * while resetting the nvme_ctrlr. 5205 */ 5206 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5207 ctrlr->is_failed = true; 5208 5209 poll_thread_times(0, 5); 5210 5211 CU_ASSERT(nvme_qpair->qpair == NULL); 5212 CU_ASSERT(nvme_ctrlr->resetting == true); 5213 CU_ASSERT(ctrlr->is_failed == false); 5214 5215 bdev_io1->internal.in_submit_request = true; 5216 5217 bdev_nvme_submit_request(ch, bdev_io1); 5218 5219 spdk_delay_us(1); 5220 5221 bdev_io2->internal.in_submit_request = true; 5222 5223 bdev_nvme_submit_request(ch, bdev_io2); 5224 5225 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5226 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5227 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5228 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx( 5229 TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx, 5230 retry_link))); 5231 5232 poll_threads(); 5233 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5234 poll_threads(); 5235 5236 CU_ASSERT(nvme_qpair->qpair != NULL); 5237 CU_ASSERT(nvme_ctrlr->resetting == false); 5238 5239 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5240 5241 poll_thread_times(0, 1); 5242 5243 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5244 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5245 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5246 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5247 5248 poll_threads(); 5249 5250 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5251 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5252 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5253 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5254 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5255 5256 spdk_delay_us(1); 5257 5258 poll_thread_times(0, 1); 5259 5260 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5261 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5262 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5263 5264 poll_threads(); 5265 5266 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5267 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5268 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5269 5270 free(bdev_io1); 5271 free(bdev_io2); 5272 5273 spdk_put_io_channel(ch); 5274 5275 poll_threads(); 5276 5277 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5278 CU_ASSERT(rc == 0); 5279 5280 poll_threads(); 5281 spdk_delay_us(1000); 5282 poll_threads(); 5283 5284 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5285 5286 g_opts.bdev_retry_count = 0; 5287 } 5288 5289 static void 5290 test_reconnect_ctrlr(void) 5291 { 5292 struct spdk_nvme_transport_id trid = {}; 5293 struct spdk_nvme_ctrlr ctrlr = {}; 5294 struct nvme_ctrlr *nvme_ctrlr; 5295 struct spdk_io_channel *ch1, *ch2; 5296 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5297 int rc; 5298 5299 ut_init_trid(&trid); 5300 TAILQ_INIT(&ctrlr.active_io_qpairs); 5301 5302 set_thread(0); 5303 5304 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5305 CU_ASSERT(rc == 0); 5306 5307 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5308 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5309 5310 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5311 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5312 5313 ch1 = spdk_get_io_channel(nvme_ctrlr); 5314 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5315 5316 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5317 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5318 5319 set_thread(1); 5320 5321 ch2 = spdk_get_io_channel(nvme_ctrlr); 5322 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5323 5324 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5325 5326 /* Reset starts from thread 1. */ 5327 set_thread(1); 5328 5329 /* The reset should fail and a reconnect timer should be registered. */ 5330 ctrlr.fail_reset = true; 5331 ctrlr.is_failed = true; 5332 5333 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5334 CU_ASSERT(rc == 0); 5335 CU_ASSERT(nvme_ctrlr->resetting == true); 5336 CU_ASSERT(ctrlr.is_failed == true); 5337 5338 poll_threads(); 5339 5340 CU_ASSERT(nvme_ctrlr->resetting == false); 5341 CU_ASSERT(ctrlr.is_failed == false); 5342 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5343 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5344 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5345 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5346 5347 /* A new reset starts from thread 0. */ 5348 set_thread(1); 5349 5350 /* The reset should cancel the reconnect timer and should start from reconnection. 5351 * Then, the reset should fail and a reconnect timer should be registered again. 5352 */ 5353 ctrlr.fail_reset = true; 5354 ctrlr.is_failed = true; 5355 5356 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5357 CU_ASSERT(rc == 0); 5358 CU_ASSERT(nvme_ctrlr->resetting == true); 5359 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5360 CU_ASSERT(ctrlr.is_failed == true); 5361 5362 poll_threads(); 5363 5364 CU_ASSERT(nvme_ctrlr->resetting == false); 5365 CU_ASSERT(ctrlr.is_failed == false); 5366 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5367 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5368 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5369 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5370 5371 /* Then a reconnect retry should suceeed. */ 5372 ctrlr.fail_reset = false; 5373 5374 spdk_delay_us(SPDK_SEC_TO_USEC); 5375 poll_thread_times(0, 1); 5376 5377 CU_ASSERT(nvme_ctrlr->resetting == true); 5378 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5379 5380 poll_threads(); 5381 5382 CU_ASSERT(nvme_ctrlr->resetting == false); 5383 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5384 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5385 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5386 5387 /* The reset should fail and a reconnect timer should be registered. */ 5388 ctrlr.fail_reset = true; 5389 ctrlr.is_failed = true; 5390 5391 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5392 CU_ASSERT(rc == 0); 5393 CU_ASSERT(nvme_ctrlr->resetting == true); 5394 CU_ASSERT(ctrlr.is_failed == true); 5395 5396 poll_threads(); 5397 5398 CU_ASSERT(nvme_ctrlr->resetting == false); 5399 CU_ASSERT(ctrlr.is_failed == false); 5400 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5401 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5402 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5403 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5404 5405 /* Then a reconnect retry should still fail. */ 5406 spdk_delay_us(SPDK_SEC_TO_USEC); 5407 poll_thread_times(0, 1); 5408 5409 CU_ASSERT(nvme_ctrlr->resetting == true); 5410 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5411 5412 poll_threads(); 5413 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5414 poll_threads(); 5415 5416 CU_ASSERT(nvme_ctrlr->resetting == false); 5417 CU_ASSERT(ctrlr.is_failed == false); 5418 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5419 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5420 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5421 5422 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5423 spdk_delay_us(SPDK_SEC_TO_USEC); 5424 poll_threads(); 5425 5426 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5427 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5428 CU_ASSERT(nvme_ctrlr->destruct == true); 5429 5430 spdk_put_io_channel(ch2); 5431 5432 set_thread(0); 5433 5434 spdk_put_io_channel(ch1); 5435 5436 poll_threads(); 5437 spdk_delay_us(1000); 5438 poll_threads(); 5439 5440 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5441 } 5442 5443 static struct nvme_path_id * 5444 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5445 const struct spdk_nvme_transport_id *trid) 5446 { 5447 struct nvme_path_id *p; 5448 5449 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5450 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5451 break; 5452 } 5453 } 5454 5455 return p; 5456 } 5457 5458 static void 5459 test_retry_failover_ctrlr(void) 5460 { 5461 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5462 struct spdk_nvme_ctrlr ctrlr = {}; 5463 struct nvme_ctrlr *nvme_ctrlr = NULL; 5464 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5465 struct spdk_io_channel *ch; 5466 struct nvme_ctrlr_channel *ctrlr_ch; 5467 int rc; 5468 5469 ut_init_trid(&trid1); 5470 ut_init_trid2(&trid2); 5471 ut_init_trid3(&trid3); 5472 TAILQ_INIT(&ctrlr.active_io_qpairs); 5473 5474 set_thread(0); 5475 5476 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5477 CU_ASSERT(rc == 0); 5478 5479 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5480 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5481 5482 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5483 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5484 5485 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5486 CU_ASSERT(rc == 0); 5487 5488 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5489 CU_ASSERT(rc == 0); 5490 5491 ch = spdk_get_io_channel(nvme_ctrlr); 5492 SPDK_CU_ASSERT_FATAL(ch != NULL); 5493 5494 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5495 5496 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5497 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5498 CU_ASSERT(path_id1->last_failed_tsc == 0); 5499 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5500 5501 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5502 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5503 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5504 5505 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5506 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5507 5508 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5509 * and a reconnect timer is started. */ 5510 ctrlr.fail_reset = true; 5511 ctrlr.is_failed = true; 5512 5513 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5514 CU_ASSERT(rc == 0); 5515 5516 poll_threads(); 5517 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5518 poll_threads(); 5519 5520 CU_ASSERT(nvme_ctrlr->resetting == false); 5521 CU_ASSERT(ctrlr.is_failed == false); 5522 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5523 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5524 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5525 CU_ASSERT(path_id1->last_failed_tsc != 0); 5526 5527 CU_ASSERT(path_id2->last_failed_tsc != 0); 5528 CU_ASSERT(path_id3->last_failed_tsc != 0); 5529 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5530 5531 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5532 * switched to trid2 but reset is not started. 5533 */ 5534 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5535 CU_ASSERT(rc == -EALREADY); 5536 5537 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5538 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5539 5540 CU_ASSERT(nvme_ctrlr->resetting == false); 5541 5542 /* If reconnect succeeds, trid2 should be the active path_id */ 5543 ctrlr.fail_reset = false; 5544 5545 spdk_delay_us(SPDK_SEC_TO_USEC); 5546 poll_thread_times(0, 1); 5547 5548 CU_ASSERT(nvme_ctrlr->resetting == true); 5549 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5550 5551 poll_threads(); 5552 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5553 poll_threads(); 5554 5555 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5556 CU_ASSERT(path_id2->last_failed_tsc == 0); 5557 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5558 CU_ASSERT(nvme_ctrlr->resetting == false); 5559 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5560 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5561 5562 spdk_put_io_channel(ch); 5563 5564 poll_threads(); 5565 5566 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5567 CU_ASSERT(rc == 0); 5568 5569 poll_threads(); 5570 spdk_delay_us(1000); 5571 poll_threads(); 5572 5573 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5574 } 5575 5576 static void 5577 test_fail_path(void) 5578 { 5579 struct nvme_path_id path = {}; 5580 struct nvme_ctrlr_opts opts = {}; 5581 struct spdk_nvme_ctrlr *ctrlr; 5582 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5583 struct nvme_ctrlr *nvme_ctrlr; 5584 const int STRING_SIZE = 32; 5585 const char *attached_names[STRING_SIZE]; 5586 struct nvme_bdev *bdev; 5587 struct nvme_ns *nvme_ns; 5588 struct spdk_bdev_io *bdev_io; 5589 struct spdk_io_channel *ch; 5590 struct nvme_bdev_channel *nbdev_ch; 5591 struct nvme_io_path *io_path; 5592 struct nvme_ctrlr_channel *ctrlr_ch; 5593 int rc; 5594 5595 /* The test scenario is the following. 5596 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5597 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5598 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5599 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5600 * comes first. The queued I/O is failed. 5601 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5602 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5603 */ 5604 5605 g_opts.bdev_retry_count = 1; 5606 5607 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5608 ut_init_trid(&path.trid); 5609 5610 set_thread(0); 5611 5612 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5613 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5614 5615 g_ut_attach_ctrlr_status = 0; 5616 g_ut_attach_bdev_count = 1; 5617 5618 opts.ctrlr_loss_timeout_sec = 4; 5619 opts.reconnect_delay_sec = 1; 5620 opts.fast_io_fail_timeout_sec = 2; 5621 5622 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5623 attach_ctrlr_done, NULL, NULL, &opts, false); 5624 CU_ASSERT(rc == 0); 5625 5626 spdk_delay_us(1000); 5627 poll_threads(); 5628 5629 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5630 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5631 5632 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5633 CU_ASSERT(nvme_ctrlr != NULL); 5634 5635 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5636 CU_ASSERT(bdev != NULL); 5637 5638 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5639 CU_ASSERT(nvme_ns != NULL); 5640 5641 ch = spdk_get_io_channel(bdev); 5642 SPDK_CU_ASSERT_FATAL(ch != NULL); 5643 5644 nbdev_ch = spdk_io_channel_get_ctx(ch); 5645 5646 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5647 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5648 5649 ctrlr_ch = io_path->qpair->ctrlr_ch; 5650 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5651 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5652 5653 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5654 ut_bdev_io_set_buf(bdev_io); 5655 5656 5657 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5658 ctrlr->fail_reset = true; 5659 ctrlr->is_failed = true; 5660 5661 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5662 CU_ASSERT(rc == 0); 5663 CU_ASSERT(nvme_ctrlr->resetting == true); 5664 CU_ASSERT(ctrlr->is_failed == true); 5665 5666 poll_threads(); 5667 5668 CU_ASSERT(nvme_ctrlr->resetting == false); 5669 CU_ASSERT(ctrlr->is_failed == false); 5670 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5671 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5672 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5673 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5674 5675 /* I/O should be queued. */ 5676 bdev_io->internal.in_submit_request = true; 5677 5678 bdev_nvme_submit_request(ch, bdev_io); 5679 5680 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5681 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5682 5683 /* After a second, the I/O should be still queued and the ctrlr should be 5684 * still recovering. 5685 */ 5686 spdk_delay_us(SPDK_SEC_TO_USEC); 5687 poll_threads(); 5688 5689 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5690 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5691 5692 CU_ASSERT(nvme_ctrlr->resetting == false); 5693 CU_ASSERT(ctrlr->is_failed == false); 5694 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5695 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5696 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5697 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5698 5699 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5700 5701 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5702 spdk_delay_us(SPDK_SEC_TO_USEC); 5703 poll_threads(); 5704 5705 CU_ASSERT(nvme_ctrlr->resetting == false); 5706 CU_ASSERT(ctrlr->is_failed == false); 5707 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5708 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5709 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5710 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5711 5712 /* Then within a second, pending I/O should be failed. */ 5713 spdk_delay_us(SPDK_SEC_TO_USEC); 5714 poll_threads(); 5715 5716 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5717 poll_threads(); 5718 5719 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5720 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5721 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5722 5723 /* Another I/O submission should be failed immediately. */ 5724 bdev_io->internal.in_submit_request = true; 5725 5726 bdev_nvme_submit_request(ch, bdev_io); 5727 5728 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5729 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5730 5731 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5732 * be deleted. 5733 */ 5734 spdk_delay_us(SPDK_SEC_TO_USEC); 5735 poll_threads(); 5736 5737 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5738 poll_threads(); 5739 5740 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5741 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5742 CU_ASSERT(nvme_ctrlr->destruct == true); 5743 5744 spdk_put_io_channel(ch); 5745 5746 poll_threads(); 5747 spdk_delay_us(1000); 5748 poll_threads(); 5749 5750 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5751 5752 free(bdev_io); 5753 5754 g_opts.bdev_retry_count = 0; 5755 } 5756 5757 static void 5758 test_nvme_ns_cmp(void) 5759 { 5760 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5761 5762 nvme_ns1.id = 0; 5763 nvme_ns2.id = UINT32_MAX; 5764 5765 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5766 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5767 } 5768 5769 static void 5770 test_ana_transition(void) 5771 { 5772 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5773 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5774 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5775 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5776 5777 /* case 1: ANA transition timedout is canceled. */ 5778 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5779 nvme_ns.ana_transition_timedout = true; 5780 5781 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5782 5783 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5784 5785 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5786 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5787 5788 /* case 2: ANATT timer is kept. */ 5789 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5790 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5791 &nvme_ns, 5792 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5793 5794 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5795 5796 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5797 5798 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5799 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5800 5801 /* case 3: ANATT timer is stopped. */ 5802 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5803 5804 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5805 5806 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5807 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5808 5809 /* ANATT timer is started. */ 5810 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5811 5812 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5813 5814 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5815 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5816 5817 /* ANATT timer is expired. */ 5818 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5819 5820 poll_threads(); 5821 5822 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5823 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5824 } 5825 5826 static void 5827 _set_preferred_path_cb(void *cb_arg, int rc) 5828 { 5829 bool *done = cb_arg; 5830 5831 *done = true; 5832 } 5833 5834 static void 5835 test_set_preferred_path(void) 5836 { 5837 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5838 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5839 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5840 const int STRING_SIZE = 32; 5841 const char *attached_names[STRING_SIZE]; 5842 struct nvme_bdev *bdev; 5843 struct spdk_io_channel *ch; 5844 struct nvme_bdev_channel *nbdev_ch; 5845 struct nvme_io_path *io_path; 5846 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5847 const struct spdk_nvme_ctrlr_data *cdata; 5848 bool done; 5849 int rc; 5850 5851 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5852 ut_init_trid(&path1.trid); 5853 ut_init_trid2(&path2.trid); 5854 ut_init_trid3(&path3.trid); 5855 g_ut_attach_ctrlr_status = 0; 5856 g_ut_attach_bdev_count = 1; 5857 5858 set_thread(0); 5859 5860 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5861 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5862 5863 ctrlr1->ns[0].uuid = &uuid1; 5864 5865 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5866 attach_ctrlr_done, NULL, NULL, NULL, true); 5867 CU_ASSERT(rc == 0); 5868 5869 spdk_delay_us(1000); 5870 poll_threads(); 5871 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5872 poll_threads(); 5873 5874 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5875 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5876 5877 ctrlr2->ns[0].uuid = &uuid1; 5878 5879 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5880 attach_ctrlr_done, NULL, NULL, NULL, true); 5881 CU_ASSERT(rc == 0); 5882 5883 spdk_delay_us(1000); 5884 poll_threads(); 5885 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5886 poll_threads(); 5887 5888 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5889 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5890 5891 ctrlr3->ns[0].uuid = &uuid1; 5892 5893 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5894 attach_ctrlr_done, NULL, NULL, NULL, true); 5895 CU_ASSERT(rc == 0); 5896 5897 spdk_delay_us(1000); 5898 poll_threads(); 5899 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5900 poll_threads(); 5901 5902 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5903 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5904 5905 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5906 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5907 5908 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5909 5910 ch = spdk_get_io_channel(bdev); 5911 SPDK_CU_ASSERT_FATAL(ch != NULL); 5912 nbdev_ch = spdk_io_channel_get_ctx(ch); 5913 5914 io_path = bdev_nvme_find_io_path(nbdev_ch); 5915 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5916 5917 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5918 5919 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5920 * should return io_path to ctrlr2. 5921 */ 5922 5923 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5924 done = false; 5925 5926 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5927 5928 poll_threads(); 5929 CU_ASSERT(done == true); 5930 5931 io_path = bdev_nvme_find_io_path(nbdev_ch); 5932 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5933 5934 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5935 5936 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5937 * acquired, find_io_path() should return io_path to ctrlr3. 5938 */ 5939 5940 spdk_put_io_channel(ch); 5941 5942 poll_threads(); 5943 5944 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5945 done = false; 5946 5947 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5948 5949 poll_threads(); 5950 CU_ASSERT(done == true); 5951 5952 ch = spdk_get_io_channel(bdev); 5953 SPDK_CU_ASSERT_FATAL(ch != NULL); 5954 nbdev_ch = spdk_io_channel_get_ctx(ch); 5955 5956 io_path = bdev_nvme_find_io_path(nbdev_ch); 5957 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5958 5959 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5960 5961 spdk_put_io_channel(ch); 5962 5963 poll_threads(); 5964 5965 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5966 CU_ASSERT(rc == 0); 5967 5968 poll_threads(); 5969 spdk_delay_us(1000); 5970 poll_threads(); 5971 5972 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5973 } 5974 5975 static void 5976 test_find_next_io_path(void) 5977 { 5978 struct nvme_bdev_channel nbdev_ch = { 5979 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5980 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5981 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 5982 }; 5983 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5984 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5985 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 5986 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5987 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5988 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5989 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5990 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5991 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5992 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5993 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5994 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5995 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 5996 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5997 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5998 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5999 6000 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6001 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6002 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6003 6004 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6005 * is covered in test_find_io_path. 6006 */ 6007 6008 nbdev_ch.current_io_path = &io_path2; 6009 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6010 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6011 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6012 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6013 6014 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6015 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6016 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6017 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6018 6019 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6020 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6021 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6022 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6023 6024 nbdev_ch.current_io_path = &io_path3; 6025 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6026 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6027 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6028 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6029 6030 /* Test if next io_path is selected according to rr_min_io */ 6031 6032 nbdev_ch.current_io_path = NULL; 6033 nbdev_ch.rr_min_io = 2; 6034 nbdev_ch.rr_counter = 0; 6035 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6036 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6037 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6038 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6039 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6040 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6041 6042 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6043 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6044 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6045 } 6046 6047 static void 6048 test_find_io_path_min_qd(void) 6049 { 6050 struct nvme_bdev_channel nbdev_ch = { 6051 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6052 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6053 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6054 }; 6055 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6056 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6057 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6058 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6059 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6060 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6061 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6062 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6063 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6064 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6065 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6066 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6067 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6068 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6069 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6070 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6071 6072 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6073 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6074 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6075 6076 /* Test if the minumum io_outstanding or the ANA optimized state is 6077 * prioritized when using least queue depth selector 6078 */ 6079 qpair1.num_outstanding_reqs = 2; 6080 qpair2.num_outstanding_reqs = 1; 6081 qpair3.num_outstanding_reqs = 0; 6082 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6083 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6084 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6085 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6086 6087 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6088 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6089 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6090 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6091 6092 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6093 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6094 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6095 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6096 6097 qpair2.num_outstanding_reqs = 4; 6098 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6099 } 6100 6101 static void 6102 test_disable_auto_failback(void) 6103 { 6104 struct nvme_path_id path1 = {}, path2 = {}; 6105 struct nvme_ctrlr_opts opts = {}; 6106 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6107 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6108 struct nvme_ctrlr *nvme_ctrlr1; 6109 const int STRING_SIZE = 32; 6110 const char *attached_names[STRING_SIZE]; 6111 struct nvme_bdev *bdev; 6112 struct spdk_io_channel *ch; 6113 struct nvme_bdev_channel *nbdev_ch; 6114 struct nvme_io_path *io_path; 6115 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6116 const struct spdk_nvme_ctrlr_data *cdata; 6117 bool done; 6118 int rc; 6119 6120 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6121 ut_init_trid(&path1.trid); 6122 ut_init_trid2(&path2.trid); 6123 g_ut_attach_ctrlr_status = 0; 6124 g_ut_attach_bdev_count = 1; 6125 6126 g_opts.disable_auto_failback = true; 6127 6128 opts.ctrlr_loss_timeout_sec = -1; 6129 opts.reconnect_delay_sec = 1; 6130 6131 set_thread(0); 6132 6133 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6134 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6135 6136 ctrlr1->ns[0].uuid = &uuid1; 6137 6138 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6139 attach_ctrlr_done, NULL, NULL, &opts, true); 6140 CU_ASSERT(rc == 0); 6141 6142 spdk_delay_us(1000); 6143 poll_threads(); 6144 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6145 poll_threads(); 6146 6147 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6148 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6149 6150 ctrlr2->ns[0].uuid = &uuid1; 6151 6152 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6153 attach_ctrlr_done, NULL, NULL, &opts, true); 6154 CU_ASSERT(rc == 0); 6155 6156 spdk_delay_us(1000); 6157 poll_threads(); 6158 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6159 poll_threads(); 6160 6161 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6162 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6163 6164 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6165 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6166 6167 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6168 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6169 6170 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6171 6172 ch = spdk_get_io_channel(bdev); 6173 SPDK_CU_ASSERT_FATAL(ch != NULL); 6174 nbdev_ch = spdk_io_channel_get_ctx(ch); 6175 6176 io_path = bdev_nvme_find_io_path(nbdev_ch); 6177 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6178 6179 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6180 6181 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6182 ctrlr1->fail_reset = true; 6183 ctrlr1->is_failed = true; 6184 6185 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6186 6187 poll_threads(); 6188 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6189 poll_threads(); 6190 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6191 poll_threads(); 6192 6193 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6194 6195 io_path = bdev_nvme_find_io_path(nbdev_ch); 6196 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6197 6198 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6199 6200 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6201 * Hence, io_path to ctrlr2 should still be used. 6202 */ 6203 ctrlr1->fail_reset = false; 6204 6205 spdk_delay_us(SPDK_SEC_TO_USEC); 6206 poll_threads(); 6207 6208 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6209 6210 io_path = bdev_nvme_find_io_path(nbdev_ch); 6211 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6212 6213 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6214 6215 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6216 * be used again. 6217 */ 6218 6219 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6220 done = false; 6221 6222 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6223 6224 poll_threads(); 6225 CU_ASSERT(done == true); 6226 6227 io_path = bdev_nvme_find_io_path(nbdev_ch); 6228 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6229 6230 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6231 6232 spdk_put_io_channel(ch); 6233 6234 poll_threads(); 6235 6236 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6237 CU_ASSERT(rc == 0); 6238 6239 poll_threads(); 6240 spdk_delay_us(1000); 6241 poll_threads(); 6242 6243 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6244 6245 g_opts.disable_auto_failback = false; 6246 } 6247 6248 static void 6249 ut_set_multipath_policy_done(void *cb_arg, int rc) 6250 { 6251 int *done = cb_arg; 6252 6253 SPDK_CU_ASSERT_FATAL(done != NULL); 6254 *done = rc; 6255 } 6256 6257 static void 6258 test_set_multipath_policy(void) 6259 { 6260 struct nvme_path_id path1 = {}, path2 = {}; 6261 struct nvme_ctrlr_opts opts = {}; 6262 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6263 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6264 const int STRING_SIZE = 32; 6265 const char *attached_names[STRING_SIZE]; 6266 struct nvme_bdev *bdev; 6267 struct spdk_io_channel *ch; 6268 struct nvme_bdev_channel *nbdev_ch; 6269 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6270 int done; 6271 int rc; 6272 6273 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6274 ut_init_trid(&path1.trid); 6275 ut_init_trid2(&path2.trid); 6276 g_ut_attach_ctrlr_status = 0; 6277 g_ut_attach_bdev_count = 1; 6278 6279 g_opts.disable_auto_failback = true; 6280 6281 opts.ctrlr_loss_timeout_sec = -1; 6282 opts.reconnect_delay_sec = 1; 6283 6284 set_thread(0); 6285 6286 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6287 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6288 6289 ctrlr1->ns[0].uuid = &uuid1; 6290 6291 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6292 attach_ctrlr_done, NULL, NULL, &opts, true); 6293 CU_ASSERT(rc == 0); 6294 6295 spdk_delay_us(1000); 6296 poll_threads(); 6297 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6298 poll_threads(); 6299 6300 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6301 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6302 6303 ctrlr2->ns[0].uuid = &uuid1; 6304 6305 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6306 attach_ctrlr_done, NULL, NULL, &opts, true); 6307 CU_ASSERT(rc == 0); 6308 6309 spdk_delay_us(1000); 6310 poll_threads(); 6311 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6312 poll_threads(); 6313 6314 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6315 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6316 6317 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6318 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6319 6320 /* If multipath policy is updated before getting any I/O channel, 6321 * an new I/O channel should have the update. 6322 */ 6323 done = -1; 6324 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6325 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6326 ut_set_multipath_policy_done, &done); 6327 poll_threads(); 6328 CU_ASSERT(done == 0); 6329 6330 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6331 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6332 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6333 6334 ch = spdk_get_io_channel(bdev); 6335 SPDK_CU_ASSERT_FATAL(ch != NULL); 6336 nbdev_ch = spdk_io_channel_get_ctx(ch); 6337 6338 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6339 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6340 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6341 6342 /* If multipath policy is updated while a I/O channel is active, 6343 * the update should be applied to the I/O channel immediately. 6344 */ 6345 done = -1; 6346 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6347 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6348 ut_set_multipath_policy_done, &done); 6349 poll_threads(); 6350 CU_ASSERT(done == 0); 6351 6352 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6353 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6354 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6355 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6356 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6357 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6358 6359 spdk_put_io_channel(ch); 6360 6361 poll_threads(); 6362 6363 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6364 CU_ASSERT(rc == 0); 6365 6366 poll_threads(); 6367 spdk_delay_us(1000); 6368 poll_threads(); 6369 6370 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6371 } 6372 6373 static void 6374 test_uuid_generation(void) 6375 { 6376 uint32_t nsid1 = 1, nsid2 = 2; 6377 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6378 char sn3[21] = " "; 6379 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6380 struct spdk_uuid uuid1, uuid2; 6381 int rc; 6382 6383 /* Test case 1: 6384 * Serial numbers are the same, nsids are different. 6385 * Compare two generated UUID - they should be different. */ 6386 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6387 CU_ASSERT(rc == 0); 6388 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6389 CU_ASSERT(rc == 0); 6390 6391 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6392 6393 /* Test case 2: 6394 * Serial numbers differ only by one character, nsids are the same. 6395 * Compare two generated UUID - they should be different. */ 6396 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6397 CU_ASSERT(rc == 0); 6398 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6399 CU_ASSERT(rc == 0); 6400 6401 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6402 6403 /* Test case 3: 6404 * Serial number comprises only of space characters. 6405 * Validate the generated UUID. */ 6406 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6407 CU_ASSERT(rc == 0); 6408 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6409 6410 } 6411 6412 static void 6413 test_retry_io_to_same_path(void) 6414 { 6415 struct nvme_path_id path1 = {}, path2 = {}; 6416 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6417 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6418 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6419 const int STRING_SIZE = 32; 6420 const char *attached_names[STRING_SIZE]; 6421 struct nvme_bdev *bdev; 6422 struct spdk_bdev_io *bdev_io; 6423 struct nvme_bdev_io *bio; 6424 struct spdk_io_channel *ch; 6425 struct nvme_bdev_channel *nbdev_ch; 6426 struct nvme_io_path *io_path1, *io_path2; 6427 struct ut_nvme_req *req; 6428 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6429 int done; 6430 int rc; 6431 6432 g_opts.nvme_ioq_poll_period_us = 1; 6433 6434 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6435 ut_init_trid(&path1.trid); 6436 ut_init_trid2(&path2.trid); 6437 g_ut_attach_ctrlr_status = 0; 6438 g_ut_attach_bdev_count = 1; 6439 6440 set_thread(0); 6441 6442 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6443 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6444 6445 ctrlr1->ns[0].uuid = &uuid1; 6446 6447 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6448 attach_ctrlr_done, NULL, NULL, NULL, true); 6449 CU_ASSERT(rc == 0); 6450 6451 spdk_delay_us(1000); 6452 poll_threads(); 6453 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6454 poll_threads(); 6455 6456 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6457 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6458 6459 ctrlr2->ns[0].uuid = &uuid1; 6460 6461 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6462 attach_ctrlr_done, NULL, NULL, NULL, true); 6463 CU_ASSERT(rc == 0); 6464 6465 spdk_delay_us(1000); 6466 poll_threads(); 6467 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6468 poll_threads(); 6469 6470 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6471 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6472 6473 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6474 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6475 6476 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 6477 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6478 6479 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6480 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6481 6482 done = -1; 6483 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6484 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6485 poll_threads(); 6486 CU_ASSERT(done == 0); 6487 6488 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6489 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6490 CU_ASSERT(bdev->rr_min_io == 1); 6491 6492 ch = spdk_get_io_channel(bdev); 6493 SPDK_CU_ASSERT_FATAL(ch != NULL); 6494 nbdev_ch = spdk_io_channel_get_ctx(ch); 6495 6496 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6497 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6498 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6499 6500 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6501 ut_bdev_io_set_buf(bdev_io); 6502 6503 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6504 6505 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6506 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6507 6508 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6509 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6510 6511 /* The 1st I/O should be submitted to io_path1. */ 6512 bdev_io->internal.in_submit_request = true; 6513 6514 bdev_nvme_submit_request(ch, bdev_io); 6515 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6516 CU_ASSERT(bio->io_path == io_path1); 6517 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6518 6519 spdk_delay_us(1); 6520 6521 poll_threads(); 6522 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6523 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6524 6525 /* The 2nd I/O should be submitted to io_path2 because the path selection 6526 * policy is round-robin. 6527 */ 6528 bdev_io->internal.in_submit_request = true; 6529 6530 bdev_nvme_submit_request(ch, bdev_io); 6531 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6532 CU_ASSERT(bio->io_path == io_path2); 6533 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6534 6535 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6536 SPDK_CU_ASSERT_FATAL(req != NULL); 6537 6538 /* Set retry count to non-zero. */ 6539 g_opts.bdev_retry_count = 2; 6540 6541 /* Inject an I/O error. */ 6542 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6543 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6544 6545 /* The 2nd I/O should be queued to nbdev_ch. */ 6546 spdk_delay_us(1); 6547 poll_thread_times(0, 1); 6548 6549 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6550 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6551 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6552 6553 /* The 2nd I/O should keep caching io_path2. */ 6554 CU_ASSERT(bio->io_path == io_path2); 6555 6556 /* The 2nd I/O should be submitted to io_path2 again. */ 6557 poll_thread_times(0, 1); 6558 6559 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6560 CU_ASSERT(bio->io_path == io_path2); 6561 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6562 6563 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6564 SPDK_CU_ASSERT_FATAL(req != NULL); 6565 6566 /* Inject an I/O error again. */ 6567 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6568 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6569 req->cpl.status.crd = 1; 6570 6571 ctrlr2->cdata.crdt[1] = 1; 6572 6573 /* The 2nd I/O should be queued to nbdev_ch. */ 6574 spdk_delay_us(1); 6575 poll_thread_times(0, 1); 6576 6577 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6578 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6579 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6580 6581 /* The 2nd I/O should keep caching io_path2. */ 6582 CU_ASSERT(bio->io_path == io_path2); 6583 6584 /* Detach ctrlr2 dynamically. */ 6585 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6586 CU_ASSERT(rc == 0); 6587 6588 spdk_delay_us(1000); 6589 poll_threads(); 6590 spdk_delay_us(1000); 6591 poll_threads(); 6592 spdk_delay_us(1000); 6593 poll_threads(); 6594 spdk_delay_us(1000); 6595 poll_threads(); 6596 6597 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 6598 6599 poll_threads(); 6600 spdk_delay_us(100000); 6601 poll_threads(); 6602 spdk_delay_us(1); 6603 poll_threads(); 6604 6605 /* The 2nd I/O should succeed by io_path1. */ 6606 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6607 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6608 CU_ASSERT(bio->io_path == io_path1); 6609 6610 free(bdev_io); 6611 6612 spdk_put_io_channel(ch); 6613 6614 poll_threads(); 6615 spdk_delay_us(1); 6616 poll_threads(); 6617 6618 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6619 CU_ASSERT(rc == 0); 6620 6621 poll_threads(); 6622 spdk_delay_us(1000); 6623 poll_threads(); 6624 6625 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6626 6627 g_opts.nvme_ioq_poll_period_us = 0; 6628 g_opts.bdev_retry_count = 0; 6629 } 6630 6631 /* This case is to verify a fix for a complex race condition that 6632 * failover is lost if fabric connect command gets timeout while 6633 * controller is being reset. 6634 */ 6635 static void 6636 test_race_between_reset_and_disconnected(void) 6637 { 6638 struct spdk_nvme_transport_id trid = {}; 6639 struct spdk_nvme_ctrlr ctrlr = {}; 6640 struct nvme_ctrlr *nvme_ctrlr = NULL; 6641 struct nvme_path_id *curr_trid; 6642 struct spdk_io_channel *ch1, *ch2; 6643 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6644 int rc; 6645 6646 ut_init_trid(&trid); 6647 TAILQ_INIT(&ctrlr.active_io_qpairs); 6648 6649 set_thread(0); 6650 6651 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6652 CU_ASSERT(rc == 0); 6653 6654 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6655 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6656 6657 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6658 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6659 6660 ch1 = spdk_get_io_channel(nvme_ctrlr); 6661 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6662 6663 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6664 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6665 6666 set_thread(1); 6667 6668 ch2 = spdk_get_io_channel(nvme_ctrlr); 6669 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6670 6671 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6672 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6673 6674 /* Reset starts from thread 1. */ 6675 set_thread(1); 6676 6677 nvme_ctrlr->resetting = false; 6678 curr_trid->last_failed_tsc = spdk_get_ticks(); 6679 ctrlr.is_failed = true; 6680 6681 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6682 CU_ASSERT(rc == 0); 6683 CU_ASSERT(nvme_ctrlr->resetting == true); 6684 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6685 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6686 6687 poll_thread_times(0, 3); 6688 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6689 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6690 6691 poll_thread_times(0, 1); 6692 poll_thread_times(1, 1); 6693 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6694 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6695 CU_ASSERT(ctrlr.is_failed == true); 6696 6697 poll_thread_times(1, 1); 6698 poll_thread_times(0, 1); 6699 CU_ASSERT(ctrlr.is_failed == false); 6700 CU_ASSERT(ctrlr.adminq.is_connected == false); 6701 6702 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6703 poll_thread_times(0, 2); 6704 CU_ASSERT(ctrlr.adminq.is_connected == true); 6705 6706 poll_thread_times(0, 1); 6707 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6708 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6709 6710 poll_thread_times(1, 1); 6711 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6712 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6713 CU_ASSERT(nvme_ctrlr->resetting == true); 6714 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6715 6716 poll_thread_times(0, 2); 6717 CU_ASSERT(nvme_ctrlr->resetting == true); 6718 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6719 poll_thread_times(1, 1); 6720 CU_ASSERT(nvme_ctrlr->resetting == true); 6721 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6722 6723 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6724 * 6725 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6726 * connect command is executed. If fabric connect command gets timeout, 6727 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6728 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6729 * 6730 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6731 */ 6732 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6733 CU_ASSERT(rc == -EINPROGRESS); 6734 CU_ASSERT(nvme_ctrlr->resetting == true); 6735 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6736 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6737 6738 poll_thread_times(0, 1); 6739 6740 CU_ASSERT(nvme_ctrlr->resetting == true); 6741 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6742 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6743 6744 poll_threads(); 6745 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6746 poll_threads(); 6747 6748 CU_ASSERT(nvme_ctrlr->resetting == false); 6749 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6750 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6751 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6752 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6753 6754 spdk_put_io_channel(ch2); 6755 6756 set_thread(0); 6757 6758 spdk_put_io_channel(ch1); 6759 6760 poll_threads(); 6761 6762 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6763 CU_ASSERT(rc == 0); 6764 6765 poll_threads(); 6766 spdk_delay_us(1000); 6767 poll_threads(); 6768 6769 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6770 } 6771 static void 6772 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6773 { 6774 int *_rc = (int *)cb_arg; 6775 6776 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6777 *_rc = rc; 6778 } 6779 6780 static void 6781 test_ctrlr_op_rpc(void) 6782 { 6783 struct spdk_nvme_transport_id trid = {}; 6784 struct spdk_nvme_ctrlr ctrlr = {}; 6785 struct nvme_ctrlr *nvme_ctrlr = NULL; 6786 struct nvme_path_id *curr_trid; 6787 struct spdk_io_channel *ch1, *ch2; 6788 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6789 int ctrlr_op_rc; 6790 int rc; 6791 6792 ut_init_trid(&trid); 6793 TAILQ_INIT(&ctrlr.active_io_qpairs); 6794 6795 set_thread(0); 6796 6797 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6798 CU_ASSERT(rc == 0); 6799 6800 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6801 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6802 6803 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6804 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6805 6806 ch1 = spdk_get_io_channel(nvme_ctrlr); 6807 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6808 6809 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6810 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6811 6812 set_thread(1); 6813 6814 ch2 = spdk_get_io_channel(nvme_ctrlr); 6815 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6816 6817 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6818 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6819 6820 /* Reset starts from thread 1. */ 6821 set_thread(1); 6822 6823 /* Case 1: ctrlr is already being destructed. */ 6824 nvme_ctrlr->destruct = true; 6825 ctrlr_op_rc = 0; 6826 6827 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6828 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6829 6830 poll_threads(); 6831 6832 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6833 6834 /* Case 2: reset is in progress. */ 6835 nvme_ctrlr->destruct = false; 6836 nvme_ctrlr->resetting = true; 6837 ctrlr_op_rc = 0; 6838 6839 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6840 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6841 6842 poll_threads(); 6843 6844 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6845 6846 /* Case 3: reset completes successfully. */ 6847 nvme_ctrlr->resetting = false; 6848 curr_trid->last_failed_tsc = spdk_get_ticks(); 6849 ctrlr.is_failed = true; 6850 ctrlr_op_rc = -1; 6851 6852 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6853 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6854 6855 CU_ASSERT(nvme_ctrlr->resetting == true); 6856 CU_ASSERT(ctrlr_op_rc == -1); 6857 6858 poll_threads(); 6859 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6860 poll_threads(); 6861 6862 CU_ASSERT(nvme_ctrlr->resetting == false); 6863 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6864 CU_ASSERT(ctrlr.is_failed == false); 6865 CU_ASSERT(ctrlr_op_rc == 0); 6866 6867 /* Case 4: invalid operation. */ 6868 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6869 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6870 6871 poll_threads(); 6872 6873 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6874 6875 spdk_put_io_channel(ch2); 6876 6877 set_thread(0); 6878 6879 spdk_put_io_channel(ch1); 6880 6881 poll_threads(); 6882 6883 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6884 CU_ASSERT(rc == 0); 6885 6886 poll_threads(); 6887 spdk_delay_us(1000); 6888 poll_threads(); 6889 6890 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6891 } 6892 6893 static void 6894 test_bdev_ctrlr_op_rpc(void) 6895 { 6896 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6897 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6898 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6899 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6900 struct nvme_path_id *curr_trid1, *curr_trid2; 6901 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6902 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6903 int ctrlr_op_rc; 6904 int rc; 6905 6906 ut_init_trid(&trid1); 6907 ut_init_trid2(&trid2); 6908 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6909 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6910 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6911 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6912 ctrlr1.cdata.cntlid = 1; 6913 ctrlr2.cdata.cntlid = 2; 6914 ctrlr1.adminq.is_connected = true; 6915 ctrlr2.adminq.is_connected = true; 6916 6917 set_thread(0); 6918 6919 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6920 CU_ASSERT(rc == 0); 6921 6922 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6923 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6924 6925 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1); 6926 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6927 6928 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6929 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6930 6931 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6932 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6933 6934 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6935 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6936 6937 set_thread(1); 6938 6939 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6940 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6941 6942 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6943 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6944 6945 set_thread(0); 6946 6947 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6948 CU_ASSERT(rc == 0); 6949 6950 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2); 6951 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6952 6953 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 6954 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 6955 6956 ch21 = spdk_get_io_channel(nvme_ctrlr2); 6957 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 6958 6959 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 6960 CU_ASSERT(ctrlr_ch21->qpair != NULL); 6961 6962 set_thread(1); 6963 6964 ch22 = spdk_get_io_channel(nvme_ctrlr2); 6965 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 6966 6967 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 6968 CU_ASSERT(ctrlr_ch22->qpair != NULL); 6969 6970 /* Reset starts from thread 1. */ 6971 set_thread(1); 6972 6973 nvme_ctrlr1->resetting = false; 6974 nvme_ctrlr2->resetting = false; 6975 curr_trid1->last_failed_tsc = spdk_get_ticks(); 6976 curr_trid2->last_failed_tsc = spdk_get_ticks(); 6977 ctrlr_op_rc = -1; 6978 6979 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 6980 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6981 6982 CU_ASSERT(nvme_ctrlr1->resetting == true); 6983 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6984 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6985 CU_ASSERT(nvme_ctrlr2->resetting == false); 6986 6987 poll_thread_times(0, 3); 6988 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6989 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 6990 6991 poll_thread_times(0, 1); 6992 poll_thread_times(1, 1); 6993 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6994 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 6995 6996 poll_thread_times(1, 1); 6997 poll_thread_times(0, 1); 6998 CU_ASSERT(ctrlr1.adminq.is_connected == false); 6999 7000 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7001 poll_thread_times(0, 2); 7002 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7003 7004 poll_thread_times(0, 1); 7005 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7006 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7007 7008 poll_thread_times(1, 1); 7009 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7010 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7011 CU_ASSERT(nvme_ctrlr1->resetting == true); 7012 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7013 7014 poll_thread_times(0, 2); 7015 poll_thread_times(1, 1); 7016 poll_thread_times(0, 1); 7017 poll_thread_times(1, 1); 7018 poll_thread_times(0, 1); 7019 poll_thread_times(1, 1); 7020 poll_thread_times(0, 1); 7021 7022 CU_ASSERT(nvme_ctrlr1->resetting == false); 7023 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7024 CU_ASSERT(nvme_ctrlr2->resetting == true); 7025 7026 poll_threads(); 7027 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7028 poll_threads(); 7029 7030 CU_ASSERT(nvme_ctrlr2->resetting == false); 7031 CU_ASSERT(ctrlr_op_rc == 0); 7032 7033 set_thread(1); 7034 7035 spdk_put_io_channel(ch12); 7036 spdk_put_io_channel(ch22); 7037 7038 set_thread(0); 7039 7040 spdk_put_io_channel(ch11); 7041 spdk_put_io_channel(ch21); 7042 7043 poll_threads(); 7044 7045 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7046 CU_ASSERT(rc == 0); 7047 7048 poll_threads(); 7049 spdk_delay_us(1000); 7050 poll_threads(); 7051 7052 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7053 } 7054 7055 static void 7056 test_disable_enable_ctrlr(void) 7057 { 7058 struct spdk_nvme_transport_id trid = {}; 7059 struct spdk_nvme_ctrlr ctrlr = {}; 7060 struct nvme_ctrlr *nvme_ctrlr = NULL; 7061 struct nvme_path_id *curr_trid; 7062 struct spdk_io_channel *ch1, *ch2; 7063 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7064 int rc; 7065 7066 ut_init_trid(&trid); 7067 TAILQ_INIT(&ctrlr.active_io_qpairs); 7068 ctrlr.adminq.is_connected = true; 7069 7070 set_thread(0); 7071 7072 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7073 CU_ASSERT(rc == 0); 7074 7075 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7076 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7077 7078 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7079 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7080 7081 ch1 = spdk_get_io_channel(nvme_ctrlr); 7082 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7083 7084 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7085 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7086 7087 set_thread(1); 7088 7089 ch2 = spdk_get_io_channel(nvme_ctrlr); 7090 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7091 7092 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7093 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7094 7095 /* Disable starts from thread 1. */ 7096 set_thread(1); 7097 7098 /* Case 1: ctrlr is already disabled. */ 7099 nvme_ctrlr->disabled = true; 7100 7101 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7102 CU_ASSERT(rc == -EALREADY); 7103 7104 /* Case 2: ctrlr is already being destructed. */ 7105 nvme_ctrlr->disabled = false; 7106 nvme_ctrlr->destruct = true; 7107 7108 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7109 CU_ASSERT(rc == -ENXIO); 7110 7111 /* Case 3: reset is in progress. */ 7112 nvme_ctrlr->destruct = false; 7113 nvme_ctrlr->resetting = true; 7114 7115 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7116 CU_ASSERT(rc == -EBUSY); 7117 7118 /* Case 4: disable completes successfully. */ 7119 nvme_ctrlr->resetting = false; 7120 7121 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7122 CU_ASSERT(rc == 0); 7123 CU_ASSERT(nvme_ctrlr->resetting == true); 7124 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7125 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7126 7127 poll_thread_times(0, 3); 7128 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7129 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7130 7131 poll_thread_times(0, 1); 7132 poll_thread_times(1, 1); 7133 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7134 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7135 7136 poll_thread_times(1, 1); 7137 poll_thread_times(0, 1); 7138 CU_ASSERT(ctrlr.adminq.is_connected == false); 7139 poll_thread_times(1, 1); 7140 poll_thread_times(0, 1); 7141 poll_thread_times(1, 1); 7142 poll_thread_times(0, 1); 7143 CU_ASSERT(nvme_ctrlr->resetting == false); 7144 CU_ASSERT(nvme_ctrlr->disabled == true); 7145 7146 /* Case 5: enable completes successfully. */ 7147 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7148 CU_ASSERT(rc == 0); 7149 7150 CU_ASSERT(nvme_ctrlr->resetting == true); 7151 CU_ASSERT(nvme_ctrlr->disabled == false); 7152 7153 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7154 poll_thread_times(0, 2); 7155 CU_ASSERT(ctrlr.adminq.is_connected == true); 7156 7157 poll_thread_times(0, 1); 7158 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7159 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7160 7161 poll_thread_times(1, 1); 7162 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7163 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7164 CU_ASSERT(nvme_ctrlr->resetting == true); 7165 7166 poll_thread_times(0, 2); 7167 CU_ASSERT(nvme_ctrlr->resetting == true); 7168 poll_thread_times(1, 1); 7169 CU_ASSERT(nvme_ctrlr->resetting == true); 7170 poll_thread_times(0, 1); 7171 CU_ASSERT(nvme_ctrlr->resetting == false); 7172 7173 /* Case 6: ctrlr is already enabled. */ 7174 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7175 CU_ASSERT(rc == -EALREADY); 7176 7177 set_thread(0); 7178 7179 /* Case 7: disable cancels delayed reconnect. */ 7180 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7181 ctrlr.fail_reset = true; 7182 7183 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7184 CU_ASSERT(rc == 0); 7185 7186 poll_threads(); 7187 7188 CU_ASSERT(nvme_ctrlr->resetting == false); 7189 CU_ASSERT(ctrlr.is_failed == false); 7190 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7191 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7192 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7193 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7194 7195 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7196 CU_ASSERT(rc == 0); 7197 7198 CU_ASSERT(nvme_ctrlr->resetting == true); 7199 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7200 7201 poll_threads(); 7202 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7203 poll_threads(); 7204 7205 CU_ASSERT(nvme_ctrlr->resetting == false); 7206 CU_ASSERT(nvme_ctrlr->disabled == true); 7207 7208 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7209 CU_ASSERT(rc == 0); 7210 7211 CU_ASSERT(nvme_ctrlr->resetting == true); 7212 CU_ASSERT(nvme_ctrlr->disabled == false); 7213 7214 poll_threads(); 7215 7216 CU_ASSERT(nvme_ctrlr->resetting == false); 7217 7218 set_thread(1); 7219 7220 spdk_put_io_channel(ch2); 7221 7222 set_thread(0); 7223 7224 spdk_put_io_channel(ch1); 7225 7226 poll_threads(); 7227 7228 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7229 CU_ASSERT(rc == 0); 7230 7231 poll_threads(); 7232 spdk_delay_us(1000); 7233 poll_threads(); 7234 7235 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7236 } 7237 7238 static void 7239 ut_delete_done(void *ctx, int rc) 7240 { 7241 int *delete_done_rc = ctx; 7242 *delete_done_rc = rc; 7243 } 7244 7245 static void 7246 test_delete_ctrlr_done(void) 7247 { 7248 struct spdk_nvme_transport_id trid = {}; 7249 struct spdk_nvme_ctrlr ctrlr = {}; 7250 int delete_done_rc = 0xDEADBEEF; 7251 int rc; 7252 7253 ut_init_trid(&trid); 7254 7255 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7256 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7257 7258 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7259 CU_ASSERT(rc == 0); 7260 7261 for (int i = 0; i < 20; i++) { 7262 poll_threads(); 7263 if (delete_done_rc == 0) { 7264 break; 7265 } 7266 spdk_delay_us(1000); 7267 } 7268 7269 CU_ASSERT(delete_done_rc == 0); 7270 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7271 } 7272 7273 static void 7274 test_ns_remove_during_reset(void) 7275 { 7276 struct nvme_path_id path = {}; 7277 struct nvme_ctrlr_opts opts = {}; 7278 struct spdk_nvme_ctrlr *ctrlr; 7279 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7280 struct nvme_ctrlr *nvme_ctrlr; 7281 const int STRING_SIZE = 32; 7282 const char *attached_names[STRING_SIZE]; 7283 struct nvme_bdev *bdev; 7284 struct nvme_ns *nvme_ns; 7285 union spdk_nvme_async_event_completion event = {}; 7286 struct spdk_nvme_cpl cpl = {}; 7287 int rc; 7288 7289 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7290 ut_init_trid(&path.trid); 7291 7292 set_thread(0); 7293 7294 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7295 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7296 7297 g_ut_attach_ctrlr_status = 0; 7298 g_ut_attach_bdev_count = 1; 7299 7300 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7301 attach_ctrlr_done, NULL, NULL, &opts, false); 7302 CU_ASSERT(rc == 0); 7303 7304 spdk_delay_us(1000); 7305 poll_threads(); 7306 7307 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7308 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7309 7310 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 7311 CU_ASSERT(nvme_ctrlr != NULL); 7312 7313 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7314 CU_ASSERT(bdev != NULL); 7315 7316 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7317 CU_ASSERT(nvme_ns != NULL); 7318 7319 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7320 * but nvme_ns->ns should be NULL. 7321 */ 7322 7323 CU_ASSERT(ctrlr->ns[0].is_active == true); 7324 ctrlr->ns[0].is_active = false; 7325 7326 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7327 CU_ASSERT(rc == 0); 7328 7329 poll_threads(); 7330 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7331 poll_threads(); 7332 7333 CU_ASSERT(nvme_ctrlr->resetting == false); 7334 CU_ASSERT(ctrlr->adminq.is_connected == true); 7335 7336 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7337 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7338 CU_ASSERT(nvme_ns->bdev == bdev); 7339 CU_ASSERT(nvme_ns->ns == NULL); 7340 7341 /* Then, async event should fill nvme_ns->ns again. */ 7342 7343 ctrlr->ns[0].is_active = true; 7344 7345 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7346 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7347 cpl.cdw0 = event.raw; 7348 7349 aer_cb(nvme_ctrlr, &cpl); 7350 7351 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7352 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7353 CU_ASSERT(nvme_ns->bdev == bdev); 7354 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7355 7356 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7357 CU_ASSERT(rc == 0); 7358 7359 poll_threads(); 7360 spdk_delay_us(1000); 7361 poll_threads(); 7362 7363 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7364 } 7365 7366 int 7367 main(int argc, char **argv) 7368 { 7369 CU_pSuite suite = NULL; 7370 unsigned int num_failures; 7371 7372 CU_initialize_registry(); 7373 7374 suite = CU_add_suite("nvme", NULL, NULL); 7375 7376 CU_ADD_TEST(suite, test_create_ctrlr); 7377 CU_ADD_TEST(suite, test_reset_ctrlr); 7378 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7379 CU_ADD_TEST(suite, test_failover_ctrlr); 7380 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7381 CU_ADD_TEST(suite, test_pending_reset); 7382 CU_ADD_TEST(suite, test_attach_ctrlr); 7383 CU_ADD_TEST(suite, test_aer_cb); 7384 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7385 CU_ADD_TEST(suite, test_add_remove_trid); 7386 CU_ADD_TEST(suite, test_abort); 7387 CU_ADD_TEST(suite, test_get_io_qpair); 7388 CU_ADD_TEST(suite, test_bdev_unregister); 7389 CU_ADD_TEST(suite, test_compare_ns); 7390 CU_ADD_TEST(suite, test_init_ana_log_page); 7391 CU_ADD_TEST(suite, test_get_memory_domains); 7392 CU_ADD_TEST(suite, test_reconnect_qpair); 7393 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7394 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7395 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7396 CU_ADD_TEST(suite, test_admin_path); 7397 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7398 CU_ADD_TEST(suite, test_find_io_path); 7399 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7400 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7401 CU_ADD_TEST(suite, test_retry_io_count); 7402 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7403 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7404 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7405 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7406 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7407 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7408 CU_ADD_TEST(suite, test_fail_path); 7409 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7410 CU_ADD_TEST(suite, test_ana_transition); 7411 CU_ADD_TEST(suite, test_set_preferred_path); 7412 CU_ADD_TEST(suite, test_find_next_io_path); 7413 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7414 CU_ADD_TEST(suite, test_disable_auto_failback); 7415 CU_ADD_TEST(suite, test_set_multipath_policy); 7416 CU_ADD_TEST(suite, test_uuid_generation); 7417 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7418 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7419 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7420 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7421 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7422 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7423 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7424 7425 allocate_threads(3); 7426 set_thread(0); 7427 bdev_nvme_library_init(); 7428 init_accel(); 7429 7430 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7431 7432 set_thread(0); 7433 bdev_nvme_library_fini(); 7434 fini_accel(); 7435 free_threads(); 7436 7437 CU_cleanup_registry(); 7438 7439 return num_failures; 7440 } 7441