1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 65 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 66 67 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 68 enum spdk_bdev_reset_stat_mode mode)); 69 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 70 struct spdk_bdev_io_stat *add)); 71 72 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 73 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 74 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 75 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 76 77 int 78 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 79 struct spdk_memory_domain **domains, int array_size) 80 { 81 int i, min_array_size; 82 83 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 84 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 85 for (i = 0; i < min_array_size; i++) { 86 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 87 } 88 } 89 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 90 91 return 0; 92 } 93 94 struct spdk_io_channel * 95 spdk_accel_get_io_channel(void) 96 { 97 return spdk_get_io_channel(g_accel_p); 98 } 99 100 void 101 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 102 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 103 { 104 /* Avoid warning that opts is used uninitialised */ 105 memset(opts, 0, opts_size); 106 } 107 108 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 109 (struct spdk_nvme_ctrlr *ctrlr), NULL); 110 111 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 112 (const struct spdk_nvme_ctrlr *ctrlr), 0); 113 114 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 115 (struct spdk_nvme_ctrlr *ctrlr), NULL); 116 117 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 118 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 119 120 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 121 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 122 123 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 124 125 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 126 127 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 128 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 129 130 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 131 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 132 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 133 134 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 135 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 136 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 137 138 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 139 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 140 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 141 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 142 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 143 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 144 145 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 146 size_t *size), 0); 147 148 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 149 150 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 151 152 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 153 154 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 155 156 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 157 158 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 159 160 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 161 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 166 char *name, size_t *size), 0); 167 168 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 169 (struct spdk_nvme_ns *ns), 0); 170 171 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 172 (const struct spdk_nvme_ctrlr *ctrlr), 0); 173 174 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 175 (struct spdk_nvme_ns *ns), 0); 176 177 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 178 (struct spdk_nvme_ns *ns), 0); 179 180 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 181 (struct spdk_nvme_ns *ns), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 184 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 185 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 186 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 187 188 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 189 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 190 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 191 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 192 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 193 194 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 195 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 196 void *payload, uint32_t payload_size, uint64_t slba, 197 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 198 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 199 200 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 201 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 202 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 203 204 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 205 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 206 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 207 208 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 209 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 210 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 211 212 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 213 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 214 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 215 216 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 217 218 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 219 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 220 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 221 222 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 223 (const struct spdk_nvme_status *status), NULL); 224 225 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 226 (const struct spdk_nvme_status *status), NULL); 227 228 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 229 230 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 231 232 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 233 234 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 235 236 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 237 238 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 239 struct iovec *iov, 240 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 241 DEFINE_STUB(spdk_accel_append_crc32c, int, 242 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 243 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 244 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 245 DEFINE_STUB_V(spdk_accel_sequence_finish, 246 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 247 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 248 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 249 250 struct ut_nvme_req { 251 uint16_t opc; 252 spdk_nvme_cmd_cb cb_fn; 253 void *cb_arg; 254 struct spdk_nvme_cpl cpl; 255 TAILQ_ENTRY(ut_nvme_req) tailq; 256 }; 257 258 struct spdk_nvme_ns { 259 struct spdk_nvme_ctrlr *ctrlr; 260 uint32_t id; 261 bool is_active; 262 struct spdk_uuid *uuid; 263 enum spdk_nvme_ana_state ana_state; 264 enum spdk_nvme_csi csi; 265 }; 266 267 struct spdk_nvme_qpair { 268 struct spdk_nvme_ctrlr *ctrlr; 269 uint8_t failure_reason; 270 bool is_connected; 271 bool in_completion_context; 272 bool delete_after_completion_context; 273 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 274 uint32_t num_outstanding_reqs; 275 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 276 struct spdk_nvme_poll_group *poll_group; 277 void *poll_group_tailq_head; 278 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 279 }; 280 281 struct spdk_nvme_ctrlr { 282 uint32_t num_ns; 283 struct spdk_nvme_ns *ns; 284 struct spdk_nvme_ns_data *nsdata; 285 struct spdk_nvme_qpair adminq; 286 struct spdk_nvme_ctrlr_data cdata; 287 bool attached; 288 bool is_failed; 289 bool fail_reset; 290 bool is_removed; 291 struct spdk_nvme_transport_id trid; 292 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 293 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 294 struct spdk_nvme_ctrlr_opts opts; 295 }; 296 297 struct spdk_nvme_poll_group { 298 void *ctx; 299 struct spdk_nvme_accel_fn_table accel_fn_table; 300 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 301 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 302 }; 303 304 struct spdk_nvme_probe_ctx { 305 struct spdk_nvme_transport_id trid; 306 void *cb_ctx; 307 spdk_nvme_attach_cb attach_cb; 308 struct spdk_nvme_ctrlr *init_ctrlr; 309 }; 310 311 uint32_t 312 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 313 { 314 uint32_t nsid; 315 316 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 317 if (ctrlr->ns[nsid - 1].is_active) { 318 return nsid; 319 } 320 } 321 322 return 0; 323 } 324 325 uint32_t 326 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 327 { 328 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 329 if (ctrlr->ns[nsid - 1].is_active) { 330 return nsid; 331 } 332 } 333 334 return 0; 335 } 336 337 uint32_t 338 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 339 { 340 return qpair->num_outstanding_reqs; 341 } 342 343 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 344 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 345 g_ut_attached_ctrlrs); 346 static int g_ut_attach_ctrlr_status; 347 static size_t g_ut_attach_bdev_count; 348 static int g_ut_register_bdev_status; 349 static struct spdk_bdev *g_ut_registered_bdev; 350 static uint16_t g_ut_cntlid; 351 static struct nvme_path_id g_any_path = {}; 352 353 static void 354 ut_init_trid(struct spdk_nvme_transport_id *trid) 355 { 356 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 357 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 358 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 359 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 360 } 361 362 static void 363 ut_init_trid2(struct spdk_nvme_transport_id *trid) 364 { 365 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 366 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 367 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 368 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 369 } 370 371 static void 372 ut_init_trid3(struct spdk_nvme_transport_id *trid) 373 { 374 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 375 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 376 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 377 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 378 } 379 380 static int 381 cmp_int(int a, int b) 382 { 383 return a - b; 384 } 385 386 int 387 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 388 const struct spdk_nvme_transport_id *trid2) 389 { 390 int cmp; 391 392 /* We assume trtype is TCP for now. */ 393 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 394 395 cmp = cmp_int(trid1->trtype, trid2->trtype); 396 if (cmp) { 397 return cmp; 398 } 399 400 cmp = strcasecmp(trid1->traddr, trid2->traddr); 401 if (cmp) { 402 return cmp; 403 } 404 405 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 406 if (cmp) { 407 return cmp; 408 } 409 410 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 411 if (cmp) { 412 return cmp; 413 } 414 415 cmp = strcmp(trid1->subnqn, trid2->subnqn); 416 if (cmp) { 417 return cmp; 418 } 419 420 return 0; 421 } 422 423 static struct spdk_nvme_ctrlr * 424 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 425 bool ana_reporting, bool multipath) 426 { 427 struct spdk_nvme_ctrlr *ctrlr; 428 uint32_t i; 429 430 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 431 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 432 /* There is a ctrlr whose trid matches. */ 433 return NULL; 434 } 435 } 436 437 ctrlr = calloc(1, sizeof(*ctrlr)); 438 if (ctrlr == NULL) { 439 return NULL; 440 } 441 442 ctrlr->attached = true; 443 ctrlr->adminq.ctrlr = ctrlr; 444 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 445 ctrlr->adminq.is_connected = true; 446 447 if (num_ns != 0) { 448 ctrlr->num_ns = num_ns; 449 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 450 if (ctrlr->ns == NULL) { 451 free(ctrlr); 452 return NULL; 453 } 454 455 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 456 if (ctrlr->nsdata == NULL) { 457 free(ctrlr->ns); 458 free(ctrlr); 459 return NULL; 460 } 461 462 for (i = 0; i < num_ns; i++) { 463 ctrlr->ns[i].id = i + 1; 464 ctrlr->ns[i].ctrlr = ctrlr; 465 ctrlr->ns[i].is_active = true; 466 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 467 ctrlr->nsdata[i].nsze = 1024; 468 ctrlr->nsdata[i].nmic.can_share = multipath; 469 } 470 471 ctrlr->cdata.nn = num_ns; 472 ctrlr->cdata.mnan = num_ns; 473 ctrlr->cdata.nanagrpid = num_ns; 474 } 475 476 ctrlr->cdata.cntlid = ++g_ut_cntlid; 477 ctrlr->cdata.cmic.multi_ctrlr = multipath; 478 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 479 ctrlr->trid = *trid; 480 TAILQ_INIT(&ctrlr->active_io_qpairs); 481 482 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 483 484 return ctrlr; 485 } 486 487 static void 488 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 489 { 490 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 491 492 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 493 free(ctrlr->nsdata); 494 free(ctrlr->ns); 495 free(ctrlr); 496 } 497 498 static int 499 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 500 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 501 { 502 struct ut_nvme_req *req; 503 504 req = calloc(1, sizeof(*req)); 505 if (req == NULL) { 506 return -ENOMEM; 507 } 508 509 req->opc = opc; 510 req->cb_fn = cb_fn; 511 req->cb_arg = cb_arg; 512 513 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 514 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 515 516 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 517 qpair->num_outstanding_reqs++; 518 519 return 0; 520 } 521 522 static struct ut_nvme_req * 523 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 524 { 525 struct ut_nvme_req *req; 526 527 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 528 if (req->cb_arg == cb_arg) { 529 break; 530 } 531 } 532 533 return req; 534 } 535 536 static struct spdk_bdev_io * 537 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 538 struct spdk_io_channel *ch) 539 { 540 struct spdk_bdev_io *bdev_io; 541 542 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 543 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 544 bdev_io->type = type; 545 bdev_io->bdev = &nbdev->disk; 546 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 547 548 return bdev_io; 549 } 550 551 static void 552 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 553 { 554 bdev_io->u.bdev.iovs = &bdev_io->iov; 555 bdev_io->u.bdev.iovcnt = 1; 556 557 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 558 bdev_io->iov.iov_len = 4096; 559 } 560 561 static void 562 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 563 { 564 if (ctrlr->is_failed) { 565 free(ctrlr); 566 return; 567 } 568 569 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 570 if (probe_ctx->cb_ctx) { 571 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 572 } 573 574 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 575 576 if (probe_ctx->attach_cb) { 577 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 578 } 579 } 580 581 int 582 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 583 { 584 struct spdk_nvme_ctrlr *ctrlr, *tmp; 585 586 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 587 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 588 continue; 589 } 590 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 591 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 592 } 593 594 free(probe_ctx); 595 596 return 0; 597 } 598 599 struct spdk_nvme_probe_ctx * 600 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 601 const struct spdk_nvme_ctrlr_opts *opts, 602 spdk_nvme_attach_cb attach_cb) 603 { 604 struct spdk_nvme_probe_ctx *probe_ctx; 605 606 if (trid == NULL) { 607 return NULL; 608 } 609 610 probe_ctx = calloc(1, sizeof(*probe_ctx)); 611 if (probe_ctx == NULL) { 612 return NULL; 613 } 614 615 probe_ctx->trid = *trid; 616 probe_ctx->cb_ctx = (void *)opts; 617 probe_ctx->attach_cb = attach_cb; 618 619 return probe_ctx; 620 } 621 622 int 623 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 624 { 625 if (ctrlr->attached) { 626 ut_detach_ctrlr(ctrlr); 627 } 628 629 return 0; 630 } 631 632 int 633 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 634 { 635 SPDK_CU_ASSERT_FATAL(ctx != NULL); 636 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 637 638 return 0; 639 } 640 641 int 642 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 643 { 644 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 645 } 646 647 void 648 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 649 { 650 memset(opts, 0, opts_size); 651 652 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 653 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 654 } 655 656 const struct spdk_nvme_ctrlr_data * 657 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 658 { 659 return &ctrlr->cdata; 660 } 661 662 uint32_t 663 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 664 { 665 return ctrlr->num_ns; 666 } 667 668 struct spdk_nvme_ns * 669 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 670 { 671 if (nsid < 1 || nsid > ctrlr->num_ns) { 672 return NULL; 673 } 674 675 return &ctrlr->ns[nsid - 1]; 676 } 677 678 bool 679 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 680 { 681 if (nsid < 1 || nsid > ctrlr->num_ns) { 682 return false; 683 } 684 685 return ctrlr->ns[nsid - 1].is_active; 686 } 687 688 union spdk_nvme_csts_register 689 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 690 { 691 union spdk_nvme_csts_register csts; 692 693 csts.raw = 0; 694 695 return csts; 696 } 697 698 union spdk_nvme_vs_register 699 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 700 { 701 union spdk_nvme_vs_register vs; 702 703 vs.raw = 0; 704 705 return vs; 706 } 707 708 struct spdk_nvme_qpair * 709 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 710 const struct spdk_nvme_io_qpair_opts *user_opts, 711 size_t opts_size) 712 { 713 struct spdk_nvme_qpair *qpair; 714 715 qpair = calloc(1, sizeof(*qpair)); 716 if (qpair == NULL) { 717 return NULL; 718 } 719 720 qpair->ctrlr = ctrlr; 721 TAILQ_INIT(&qpair->outstanding_reqs); 722 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 723 724 return qpair; 725 } 726 727 static void 728 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 729 { 730 struct spdk_nvme_poll_group *group = qpair->poll_group; 731 732 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 733 734 qpair->poll_group_tailq_head = &group->connected_qpairs; 735 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 736 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 737 } 738 739 static void 740 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 741 { 742 struct spdk_nvme_poll_group *group = qpair->poll_group; 743 744 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 745 746 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 747 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 748 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 749 } 750 751 int 752 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 753 struct spdk_nvme_qpair *qpair) 754 { 755 if (qpair->is_connected) { 756 return -EISCONN; 757 } 758 759 qpair->is_connected = true; 760 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 761 762 if (qpair->poll_group) { 763 nvme_poll_group_connect_qpair(qpair); 764 } 765 766 return 0; 767 } 768 769 void 770 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 771 { 772 if (!qpair->is_connected) { 773 return; 774 } 775 776 qpair->is_connected = false; 777 778 if (qpair->poll_group != NULL) { 779 nvme_poll_group_disconnect_qpair(qpair); 780 } 781 } 782 783 int 784 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 785 { 786 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 787 788 if (qpair->in_completion_context) { 789 qpair->delete_after_completion_context = true; 790 return 0; 791 } 792 793 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 794 795 if (qpair->poll_group != NULL) { 796 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 797 } 798 799 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 800 801 CU_ASSERT(qpair->num_outstanding_reqs == 0); 802 803 free(qpair); 804 805 return 0; 806 } 807 808 int 809 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 810 { 811 if (ctrlr->fail_reset) { 812 ctrlr->is_failed = true; 813 return -EIO; 814 } 815 816 ctrlr->adminq.is_connected = true; 817 return 0; 818 } 819 820 void 821 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 822 { 823 } 824 825 int 826 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 827 { 828 if (ctrlr->is_removed) { 829 return -ENXIO; 830 } 831 832 ctrlr->adminq.is_connected = false; 833 ctrlr->is_failed = false; 834 835 return 0; 836 } 837 838 void 839 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 840 { 841 ctrlr->is_failed = true; 842 } 843 844 bool 845 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 846 { 847 return ctrlr->is_failed; 848 } 849 850 spdk_nvme_qp_failure_reason 851 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 852 { 853 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 854 } 855 856 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 857 sizeof(uint32_t)) 858 static void 859 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 860 { 861 struct spdk_nvme_ana_page ana_hdr; 862 char _ana_desc[UT_ANA_DESC_SIZE]; 863 struct spdk_nvme_ana_group_descriptor *ana_desc; 864 struct spdk_nvme_ns *ns; 865 uint32_t i; 866 867 memset(&ana_hdr, 0, sizeof(ana_hdr)); 868 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 869 870 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 871 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 872 873 buf += sizeof(ana_hdr); 874 length -= sizeof(ana_hdr); 875 876 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 877 878 for (i = 0; i < ctrlr->num_ns; i++) { 879 ns = &ctrlr->ns[i]; 880 881 if (!ns->is_active) { 882 continue; 883 } 884 885 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 886 887 ana_desc->ana_group_id = ns->id; 888 ana_desc->num_of_nsid = 1; 889 ana_desc->ana_state = ns->ana_state; 890 ana_desc->nsid[0] = ns->id; 891 892 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 893 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 894 895 buf += UT_ANA_DESC_SIZE; 896 length -= UT_ANA_DESC_SIZE; 897 } 898 } 899 900 int 901 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 902 uint8_t log_page, uint32_t nsid, 903 void *payload, uint32_t payload_size, 904 uint64_t offset, 905 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 906 { 907 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 908 SPDK_CU_ASSERT_FATAL(offset == 0); 909 ut_create_ana_log_page(ctrlr, payload, payload_size); 910 } 911 912 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 913 cb_fn, cb_arg); 914 } 915 916 int 917 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 918 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 919 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 920 { 921 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 922 } 923 924 int 925 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 926 void *cmd_cb_arg, 927 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 928 { 929 struct ut_nvme_req *req = NULL, *abort_req; 930 931 if (qpair == NULL) { 932 qpair = &ctrlr->adminq; 933 } 934 935 abort_req = calloc(1, sizeof(*abort_req)); 936 if (abort_req == NULL) { 937 return -ENOMEM; 938 } 939 940 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 941 if (req->cb_arg == cmd_cb_arg) { 942 break; 943 } 944 } 945 946 if (req == NULL) { 947 free(abort_req); 948 return -ENOENT; 949 } 950 951 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 952 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 953 954 abort_req->opc = SPDK_NVME_OPC_ABORT; 955 abort_req->cb_fn = cb_fn; 956 abort_req->cb_arg = cb_arg; 957 958 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 959 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 960 abort_req->cpl.cdw0 = 0; 961 962 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 963 ctrlr->adminq.num_outstanding_reqs++; 964 965 return 0; 966 } 967 968 int32_t 969 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 970 { 971 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 972 } 973 974 uint32_t 975 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 976 { 977 return ns->id; 978 } 979 980 struct spdk_nvme_ctrlr * 981 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 982 { 983 return ns->ctrlr; 984 } 985 986 static inline struct spdk_nvme_ns_data * 987 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 988 { 989 return &ns->ctrlr->nsdata[ns->id - 1]; 990 } 991 992 const struct spdk_nvme_ns_data * 993 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 994 { 995 return _nvme_ns_get_data(ns); 996 } 997 998 uint64_t 999 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1000 { 1001 return _nvme_ns_get_data(ns)->nsze; 1002 } 1003 1004 const struct spdk_uuid * 1005 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1006 { 1007 return ns->uuid; 1008 } 1009 1010 enum spdk_nvme_csi 1011 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1012 return ns->csi; 1013 } 1014 1015 int 1016 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1017 void *metadata, uint64_t lba, uint32_t lba_count, 1018 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1019 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1020 { 1021 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1022 } 1023 1024 int 1025 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1026 void *buffer, void *metadata, uint64_t lba, 1027 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1028 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1029 { 1030 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1031 } 1032 1033 int 1034 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1035 uint64_t lba, uint32_t lba_count, 1036 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1037 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1038 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1039 uint16_t apptag_mask, uint16_t apptag) 1040 { 1041 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1042 } 1043 1044 int 1045 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1046 uint64_t lba, uint32_t lba_count, 1047 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1048 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1049 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1050 uint16_t apptag_mask, uint16_t apptag) 1051 { 1052 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1053 } 1054 1055 static bool g_ut_readv_ext_called; 1056 int 1057 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1058 uint64_t lba, uint32_t lba_count, 1059 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1060 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1061 spdk_nvme_req_next_sge_cb next_sge_fn, 1062 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1063 { 1064 g_ut_readv_ext_called = true; 1065 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1066 } 1067 1068 static bool g_ut_read_ext_called; 1069 int 1070 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1071 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1072 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1073 { 1074 g_ut_read_ext_called = true; 1075 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1076 } 1077 1078 static bool g_ut_writev_ext_called; 1079 int 1080 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1081 uint64_t lba, uint32_t lba_count, 1082 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1083 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1084 spdk_nvme_req_next_sge_cb next_sge_fn, 1085 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1086 { 1087 g_ut_writev_ext_called = true; 1088 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1089 } 1090 1091 static bool g_ut_write_ext_called; 1092 int 1093 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1094 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1095 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1096 { 1097 g_ut_write_ext_called = true; 1098 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1099 } 1100 1101 int 1102 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1103 uint64_t lba, uint32_t lba_count, 1104 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1105 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1106 spdk_nvme_req_next_sge_cb next_sge_fn, 1107 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1108 { 1109 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1110 } 1111 1112 int 1113 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1114 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1115 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1116 { 1117 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1118 } 1119 1120 int 1121 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1122 uint64_t lba, uint32_t lba_count, 1123 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1124 uint32_t io_flags) 1125 { 1126 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1127 } 1128 1129 int 1130 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1131 const struct spdk_nvme_scc_source_range *ranges, 1132 uint16_t num_ranges, uint64_t dest_lba, 1133 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1134 { 1135 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1136 } 1137 1138 struct spdk_nvme_poll_group * 1139 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1140 { 1141 struct spdk_nvme_poll_group *group; 1142 1143 group = calloc(1, sizeof(*group)); 1144 if (group == NULL) { 1145 return NULL; 1146 } 1147 1148 group->ctx = ctx; 1149 if (table != NULL) { 1150 group->accel_fn_table = *table; 1151 } 1152 TAILQ_INIT(&group->connected_qpairs); 1153 TAILQ_INIT(&group->disconnected_qpairs); 1154 1155 return group; 1156 } 1157 1158 int 1159 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1160 { 1161 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1162 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1163 return -EBUSY; 1164 } 1165 1166 free(group); 1167 1168 return 0; 1169 } 1170 1171 spdk_nvme_qp_failure_reason 1172 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1173 { 1174 return qpair->failure_reason; 1175 } 1176 1177 bool 1178 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1179 { 1180 return qpair->is_connected; 1181 } 1182 1183 int32_t 1184 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1185 uint32_t max_completions) 1186 { 1187 struct ut_nvme_req *req, *tmp; 1188 uint32_t num_completions = 0; 1189 1190 if (!qpair->is_connected) { 1191 return -ENXIO; 1192 } 1193 1194 qpair->in_completion_context = true; 1195 1196 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1197 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1198 qpair->num_outstanding_reqs--; 1199 1200 req->cb_fn(req->cb_arg, &req->cpl); 1201 1202 free(req); 1203 num_completions++; 1204 } 1205 1206 qpair->in_completion_context = false; 1207 if (qpair->delete_after_completion_context) { 1208 spdk_nvme_ctrlr_free_io_qpair(qpair); 1209 } 1210 1211 return num_completions; 1212 } 1213 1214 int64_t 1215 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1216 uint32_t completions_per_qpair, 1217 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1218 { 1219 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1220 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1221 1222 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1223 1224 if (disconnected_qpair_cb == NULL) { 1225 return -EINVAL; 1226 } 1227 1228 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1229 disconnected_qpair_cb(qpair, group->ctx); 1230 } 1231 1232 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1233 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1234 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1235 /* Bump the number of completions so this counts as "busy" */ 1236 num_completions++; 1237 continue; 1238 } 1239 1240 local_completions = spdk_nvme_qpair_process_completions(qpair, 1241 completions_per_qpair); 1242 if (local_completions < 0 && error_reason == 0) { 1243 error_reason = local_completions; 1244 } else { 1245 num_completions += local_completions; 1246 assert(num_completions >= 0); 1247 } 1248 } 1249 1250 return error_reason ? error_reason : num_completions; 1251 } 1252 1253 int 1254 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1255 struct spdk_nvme_qpair *qpair) 1256 { 1257 CU_ASSERT(!qpair->is_connected); 1258 1259 qpair->poll_group = group; 1260 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1261 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1262 1263 return 0; 1264 } 1265 1266 int 1267 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1268 struct spdk_nvme_qpair *qpair) 1269 { 1270 CU_ASSERT(!qpair->is_connected); 1271 1272 if (qpair->poll_group == NULL) { 1273 return -ENOENT; 1274 } 1275 1276 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1277 1278 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1279 1280 qpair->poll_group = NULL; 1281 qpair->poll_group_tailq_head = NULL; 1282 1283 return 0; 1284 } 1285 1286 int 1287 spdk_bdev_register(struct spdk_bdev *bdev) 1288 { 1289 g_ut_registered_bdev = bdev; 1290 1291 return g_ut_register_bdev_status; 1292 } 1293 1294 void 1295 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1296 { 1297 int rc; 1298 1299 rc = bdev->fn_table->destruct(bdev->ctxt); 1300 1301 if (bdev == g_ut_registered_bdev) { 1302 g_ut_registered_bdev = NULL; 1303 } 1304 1305 if (rc <= 0 && cb_fn != NULL) { 1306 cb_fn(cb_arg, rc); 1307 } 1308 } 1309 1310 int 1311 spdk_bdev_open_ext(const char *bdev_name, bool write, 1312 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1313 struct spdk_bdev_desc **desc) 1314 { 1315 if (g_ut_registered_bdev == NULL || 1316 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1317 return -ENODEV; 1318 } 1319 1320 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1321 1322 return 0; 1323 } 1324 1325 struct spdk_bdev * 1326 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1327 { 1328 return (struct spdk_bdev *)desc; 1329 } 1330 1331 int 1332 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1333 { 1334 bdev->blockcnt = size; 1335 1336 return 0; 1337 } 1338 1339 struct spdk_io_channel * 1340 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1341 { 1342 return (struct spdk_io_channel *)bdev_io->internal.ch; 1343 } 1344 1345 struct spdk_thread * 1346 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1347 { 1348 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1349 } 1350 1351 void 1352 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1353 { 1354 bdev_io->internal.status = status; 1355 bdev_io->internal.in_submit_request = false; 1356 } 1357 1358 void 1359 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1360 { 1361 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1362 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1363 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1364 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1365 } else { 1366 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1367 } 1368 1369 bdev_io->internal.error.nvme.cdw0 = cdw0; 1370 bdev_io->internal.error.nvme.sct = sct; 1371 bdev_io->internal.error.nvme.sc = sc; 1372 1373 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1374 } 1375 1376 void 1377 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1378 { 1379 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1380 1381 ut_bdev_io_set_buf(bdev_io); 1382 1383 cb(ch, bdev_io, true); 1384 } 1385 1386 static void 1387 test_create_ctrlr(void) 1388 { 1389 struct spdk_nvme_transport_id trid = {}; 1390 struct spdk_nvme_ctrlr ctrlr = {}; 1391 int rc; 1392 1393 ut_init_trid(&trid); 1394 1395 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1396 CU_ASSERT(rc == 0); 1397 1398 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1399 1400 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1401 CU_ASSERT(rc == 0); 1402 1403 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1404 1405 poll_threads(); 1406 spdk_delay_us(1000); 1407 poll_threads(); 1408 1409 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1410 } 1411 1412 static void 1413 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1414 { 1415 bool *detect_remove = cb_arg; 1416 1417 CU_ASSERT(rc != 0); 1418 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1419 1420 *detect_remove = true; 1421 } 1422 1423 static void 1424 test_reset_ctrlr(void) 1425 { 1426 struct spdk_nvme_transport_id trid = {}; 1427 struct spdk_nvme_ctrlr ctrlr = {}; 1428 struct nvme_ctrlr *nvme_ctrlr = NULL; 1429 struct nvme_path_id *curr_trid; 1430 struct spdk_io_channel *ch1, *ch2; 1431 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1432 bool detect_remove; 1433 int rc; 1434 1435 ut_init_trid(&trid); 1436 TAILQ_INIT(&ctrlr.active_io_qpairs); 1437 1438 set_thread(0); 1439 1440 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1441 CU_ASSERT(rc == 0); 1442 1443 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1444 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1445 1446 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1447 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1448 1449 ch1 = spdk_get_io_channel(nvme_ctrlr); 1450 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1451 1452 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1453 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1454 1455 set_thread(1); 1456 1457 ch2 = spdk_get_io_channel(nvme_ctrlr); 1458 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1459 1460 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1461 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1462 1463 /* Reset starts from thread 1. */ 1464 set_thread(1); 1465 1466 /* Case 1: ctrlr is already being destructed. */ 1467 nvme_ctrlr->destruct = true; 1468 1469 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1470 CU_ASSERT(rc == -ENXIO); 1471 1472 /* Case 2: reset is in progress. */ 1473 nvme_ctrlr->destruct = false; 1474 nvme_ctrlr->resetting = true; 1475 1476 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1477 CU_ASSERT(rc == -EBUSY); 1478 1479 /* Case 3: reset completes successfully. */ 1480 nvme_ctrlr->resetting = false; 1481 curr_trid->last_failed_tsc = spdk_get_ticks(); 1482 ctrlr.is_failed = true; 1483 1484 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1485 CU_ASSERT(rc == 0); 1486 CU_ASSERT(nvme_ctrlr->resetting == true); 1487 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1488 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1489 1490 poll_thread_times(0, 3); 1491 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1492 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1493 1494 poll_thread_times(0, 1); 1495 poll_thread_times(1, 1); 1496 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1497 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1498 CU_ASSERT(ctrlr.is_failed == true); 1499 1500 poll_thread_times(1, 1); 1501 poll_thread_times(0, 1); 1502 CU_ASSERT(ctrlr.is_failed == false); 1503 CU_ASSERT(ctrlr.adminq.is_connected == false); 1504 1505 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1506 poll_thread_times(0, 2); 1507 CU_ASSERT(ctrlr.adminq.is_connected == true); 1508 1509 poll_thread_times(0, 1); 1510 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1511 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1512 1513 poll_thread_times(1, 1); 1514 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1515 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1516 CU_ASSERT(nvme_ctrlr->resetting == true); 1517 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1518 1519 poll_thread_times(0, 2); 1520 CU_ASSERT(nvme_ctrlr->resetting == true); 1521 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1522 poll_thread_times(1, 1); 1523 CU_ASSERT(nvme_ctrlr->resetting == true); 1524 poll_thread_times(0, 1); 1525 CU_ASSERT(nvme_ctrlr->resetting == false); 1526 1527 /* Case 4: ctrlr is already removed. */ 1528 ctrlr.is_removed = true; 1529 1530 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1531 CU_ASSERT(rc == 0); 1532 1533 detect_remove = false; 1534 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1535 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1536 1537 poll_threads(); 1538 1539 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1540 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1541 CU_ASSERT(detect_remove == true); 1542 1543 ctrlr.is_removed = false; 1544 1545 spdk_put_io_channel(ch2); 1546 1547 set_thread(0); 1548 1549 spdk_put_io_channel(ch1); 1550 1551 poll_threads(); 1552 1553 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1554 CU_ASSERT(rc == 0); 1555 1556 poll_threads(); 1557 spdk_delay_us(1000); 1558 poll_threads(); 1559 1560 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1561 } 1562 1563 static void 1564 test_race_between_reset_and_destruct_ctrlr(void) 1565 { 1566 struct spdk_nvme_transport_id trid = {}; 1567 struct spdk_nvme_ctrlr ctrlr = {}; 1568 struct nvme_ctrlr *nvme_ctrlr; 1569 struct spdk_io_channel *ch1, *ch2; 1570 int rc; 1571 1572 ut_init_trid(&trid); 1573 TAILQ_INIT(&ctrlr.active_io_qpairs); 1574 1575 set_thread(0); 1576 1577 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1578 CU_ASSERT(rc == 0); 1579 1580 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1581 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1582 1583 ch1 = spdk_get_io_channel(nvme_ctrlr); 1584 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1585 1586 set_thread(1); 1587 1588 ch2 = spdk_get_io_channel(nvme_ctrlr); 1589 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1590 1591 /* Reset starts from thread 1. */ 1592 set_thread(1); 1593 1594 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1595 CU_ASSERT(rc == 0); 1596 CU_ASSERT(nvme_ctrlr->resetting == true); 1597 1598 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1599 set_thread(0); 1600 1601 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1602 CU_ASSERT(rc == 0); 1603 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1604 CU_ASSERT(nvme_ctrlr->destruct == true); 1605 CU_ASSERT(nvme_ctrlr->resetting == true); 1606 1607 poll_threads(); 1608 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1609 poll_threads(); 1610 1611 /* Reset completed but ctrlr is not still destructed yet. */ 1612 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1613 CU_ASSERT(nvme_ctrlr->destruct == true); 1614 CU_ASSERT(nvme_ctrlr->resetting == false); 1615 1616 /* New reset request is rejected. */ 1617 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1618 CU_ASSERT(rc == -ENXIO); 1619 1620 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1621 * However there are two channels and destruct is not completed yet. 1622 */ 1623 poll_threads(); 1624 1625 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1626 1627 set_thread(0); 1628 1629 spdk_put_io_channel(ch1); 1630 1631 set_thread(1); 1632 1633 spdk_put_io_channel(ch2); 1634 1635 poll_threads(); 1636 spdk_delay_us(1000); 1637 poll_threads(); 1638 1639 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1640 } 1641 1642 static void 1643 test_failover_ctrlr(void) 1644 { 1645 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1646 struct spdk_nvme_ctrlr ctrlr = {}; 1647 struct nvme_ctrlr *nvme_ctrlr = NULL; 1648 struct nvme_path_id *curr_trid, *next_trid; 1649 struct spdk_io_channel *ch1, *ch2; 1650 int rc; 1651 1652 ut_init_trid(&trid1); 1653 ut_init_trid2(&trid2); 1654 TAILQ_INIT(&ctrlr.active_io_qpairs); 1655 1656 set_thread(0); 1657 1658 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1659 CU_ASSERT(rc == 0); 1660 1661 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1662 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1663 1664 ch1 = spdk_get_io_channel(nvme_ctrlr); 1665 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1666 1667 set_thread(1); 1668 1669 ch2 = spdk_get_io_channel(nvme_ctrlr); 1670 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1671 1672 /* First, test one trid case. */ 1673 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1674 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1675 1676 /* Failover starts from thread 1. */ 1677 set_thread(1); 1678 1679 /* Case 1: ctrlr is already being destructed. */ 1680 nvme_ctrlr->destruct = true; 1681 1682 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1683 CU_ASSERT(rc == -ENXIO); 1684 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1685 1686 /* Case 2: reset is in progress. */ 1687 nvme_ctrlr->destruct = false; 1688 nvme_ctrlr->resetting = true; 1689 1690 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1691 CU_ASSERT(rc == -EINPROGRESS); 1692 1693 /* Case 3: reset completes successfully. */ 1694 nvme_ctrlr->resetting = false; 1695 1696 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1697 CU_ASSERT(rc == 0); 1698 1699 CU_ASSERT(nvme_ctrlr->resetting == true); 1700 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1701 1702 poll_threads(); 1703 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1704 poll_threads(); 1705 1706 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1707 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1708 1709 CU_ASSERT(nvme_ctrlr->resetting == false); 1710 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1711 1712 set_thread(0); 1713 1714 /* Second, test two trids case. */ 1715 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1716 CU_ASSERT(rc == 0); 1717 1718 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1719 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1720 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1721 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1722 1723 /* Failover starts from thread 1. */ 1724 set_thread(1); 1725 1726 /* Case 4: reset is in progress. */ 1727 nvme_ctrlr->resetting = true; 1728 1729 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1730 CU_ASSERT(rc == -EINPROGRESS); 1731 1732 /* Case 5: failover completes successfully. */ 1733 nvme_ctrlr->resetting = false; 1734 1735 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1736 CU_ASSERT(rc == 0); 1737 1738 CU_ASSERT(nvme_ctrlr->resetting == true); 1739 1740 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1741 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1742 CU_ASSERT(next_trid != curr_trid); 1743 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1744 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1745 1746 poll_threads(); 1747 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1748 poll_threads(); 1749 1750 CU_ASSERT(nvme_ctrlr->resetting == false); 1751 1752 spdk_put_io_channel(ch2); 1753 1754 set_thread(0); 1755 1756 spdk_put_io_channel(ch1); 1757 1758 poll_threads(); 1759 1760 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1761 CU_ASSERT(rc == 0); 1762 1763 poll_threads(); 1764 spdk_delay_us(1000); 1765 poll_threads(); 1766 1767 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1768 } 1769 1770 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1771 * 1772 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1773 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1774 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1775 * have been active, i.e., the head of the list until the failover completed. 1776 * However trid3 was inserted to the head of the list by mistake. 1777 * 1778 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1779 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1780 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1781 * may be executed repeatedly before failover is executed. Hence this bug is real. 1782 * 1783 * The following test verifies the fix. 1784 */ 1785 static void 1786 test_race_between_failover_and_add_secondary_trid(void) 1787 { 1788 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1789 struct spdk_nvme_ctrlr ctrlr = {}; 1790 struct nvme_ctrlr *nvme_ctrlr = NULL; 1791 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1792 struct spdk_io_channel *ch1, *ch2; 1793 int rc; 1794 1795 ut_init_trid(&trid1); 1796 ut_init_trid2(&trid2); 1797 ut_init_trid3(&trid3); 1798 TAILQ_INIT(&ctrlr.active_io_qpairs); 1799 1800 set_thread(0); 1801 1802 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1803 CU_ASSERT(rc == 0); 1804 1805 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1806 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1807 1808 ch1 = spdk_get_io_channel(nvme_ctrlr); 1809 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1810 1811 set_thread(1); 1812 1813 ch2 = spdk_get_io_channel(nvme_ctrlr); 1814 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1815 1816 set_thread(0); 1817 1818 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1819 CU_ASSERT(rc == 0); 1820 1821 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1822 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1823 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1824 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1825 path_id2 = TAILQ_NEXT(path_id1, link); 1826 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1827 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1828 1829 ctrlr.fail_reset = true; 1830 1831 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1832 CU_ASSERT(rc == 0); 1833 1834 poll_threads(); 1835 1836 CU_ASSERT(path_id1->last_failed_tsc != 0); 1837 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1838 1839 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1840 CU_ASSERT(rc == 0); 1841 1842 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1843 CU_ASSERT(rc == 0); 1844 1845 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1846 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1847 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1848 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1849 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1850 path_id3 = TAILQ_NEXT(path_id2, link); 1851 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1852 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1853 1854 poll_threads(); 1855 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1856 poll_threads(); 1857 1858 spdk_put_io_channel(ch1); 1859 1860 set_thread(1); 1861 1862 spdk_put_io_channel(ch2); 1863 1864 poll_threads(); 1865 1866 set_thread(0); 1867 1868 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1869 CU_ASSERT(rc == 0); 1870 1871 poll_threads(); 1872 spdk_delay_us(1000); 1873 poll_threads(); 1874 1875 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1876 } 1877 1878 static void 1879 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1880 { 1881 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1882 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1883 } 1884 1885 static void 1886 test_pending_reset(void) 1887 { 1888 struct spdk_nvme_transport_id trid = {}; 1889 struct spdk_nvme_ctrlr *ctrlr; 1890 struct nvme_ctrlr *nvme_ctrlr = NULL; 1891 const int STRING_SIZE = 32; 1892 const char *attached_names[STRING_SIZE]; 1893 struct nvme_bdev *bdev; 1894 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1895 struct spdk_io_channel *ch1, *ch2; 1896 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1897 struct nvme_io_path *io_path1, *io_path2; 1898 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1899 int rc; 1900 1901 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1902 ut_init_trid(&trid); 1903 1904 set_thread(0); 1905 1906 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1907 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1908 1909 g_ut_attach_ctrlr_status = 0; 1910 g_ut_attach_bdev_count = 1; 1911 1912 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1913 attach_ctrlr_done, NULL, NULL, NULL, false); 1914 CU_ASSERT(rc == 0); 1915 1916 spdk_delay_us(1000); 1917 poll_threads(); 1918 1919 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1920 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1921 1922 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1923 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1924 1925 ch1 = spdk_get_io_channel(bdev); 1926 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1927 1928 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1929 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1930 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1931 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1932 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1933 1934 set_thread(1); 1935 1936 ch2 = spdk_get_io_channel(bdev); 1937 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1938 1939 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1940 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1941 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1942 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1943 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1944 1945 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1946 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1947 1948 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1949 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1950 1951 /* The first reset request is submitted on thread 1, and the second reset request 1952 * is submitted on thread 0 while processing the first request. 1953 */ 1954 bdev_nvme_submit_request(ch2, first_bdev_io); 1955 CU_ASSERT(nvme_ctrlr->resetting == true); 1956 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1957 1958 set_thread(0); 1959 1960 bdev_nvme_submit_request(ch1, second_bdev_io); 1961 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1962 1963 poll_threads(); 1964 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1965 poll_threads(); 1966 1967 CU_ASSERT(nvme_ctrlr->resetting == false); 1968 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1969 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1970 1971 /* The first reset request is submitted on thread 1, and the second reset request 1972 * is submitted on thread 0 while processing the first request. 1973 * 1974 * The difference from the above scenario is that the controller is removed while 1975 * processing the first request. Hence both reset requests should fail. 1976 */ 1977 set_thread(1); 1978 1979 bdev_nvme_submit_request(ch2, first_bdev_io); 1980 CU_ASSERT(nvme_ctrlr->resetting == true); 1981 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1982 1983 set_thread(0); 1984 1985 bdev_nvme_submit_request(ch1, second_bdev_io); 1986 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1987 1988 ctrlr->fail_reset = true; 1989 1990 poll_threads(); 1991 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1992 poll_threads(); 1993 1994 CU_ASSERT(nvme_ctrlr->resetting == false); 1995 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1996 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1997 1998 spdk_put_io_channel(ch1); 1999 2000 set_thread(1); 2001 2002 spdk_put_io_channel(ch2); 2003 2004 poll_threads(); 2005 2006 set_thread(0); 2007 2008 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2009 CU_ASSERT(rc == 0); 2010 2011 poll_threads(); 2012 spdk_delay_us(1000); 2013 poll_threads(); 2014 2015 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2016 2017 free(first_bdev_io); 2018 free(second_bdev_io); 2019 } 2020 2021 static void 2022 test_attach_ctrlr(void) 2023 { 2024 struct spdk_nvme_transport_id trid = {}; 2025 struct spdk_nvme_ctrlr *ctrlr; 2026 struct nvme_ctrlr *nvme_ctrlr; 2027 const int STRING_SIZE = 32; 2028 const char *attached_names[STRING_SIZE]; 2029 struct nvme_bdev *nbdev; 2030 int rc; 2031 2032 set_thread(0); 2033 2034 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2035 ut_init_trid(&trid); 2036 2037 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2038 * by probe polling. 2039 */ 2040 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2041 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2042 2043 ctrlr->is_failed = true; 2044 g_ut_attach_ctrlr_status = -EIO; 2045 g_ut_attach_bdev_count = 0; 2046 2047 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2048 attach_ctrlr_done, NULL, NULL, NULL, false); 2049 CU_ASSERT(rc == 0); 2050 2051 spdk_delay_us(1000); 2052 poll_threads(); 2053 2054 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2055 2056 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2057 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2058 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2059 2060 g_ut_attach_ctrlr_status = 0; 2061 2062 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2063 attach_ctrlr_done, NULL, NULL, NULL, false); 2064 CU_ASSERT(rc == 0); 2065 2066 spdk_delay_us(1000); 2067 poll_threads(); 2068 2069 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2070 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2071 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2072 2073 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2074 CU_ASSERT(rc == 0); 2075 2076 poll_threads(); 2077 spdk_delay_us(1000); 2078 poll_threads(); 2079 2080 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2081 2082 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2083 * one nvme_bdev is created. 2084 */ 2085 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2086 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2087 2088 g_ut_attach_bdev_count = 1; 2089 2090 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2091 attach_ctrlr_done, NULL, NULL, NULL, false); 2092 CU_ASSERT(rc == 0); 2093 2094 spdk_delay_us(1000); 2095 poll_threads(); 2096 2097 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2098 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2099 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2100 2101 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2102 attached_names[0] = NULL; 2103 2104 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2105 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2106 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2107 2108 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2109 CU_ASSERT(rc == 0); 2110 2111 poll_threads(); 2112 spdk_delay_us(1000); 2113 poll_threads(); 2114 2115 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2116 2117 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2118 * created because creating one nvme_bdev failed. 2119 */ 2120 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2121 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2122 2123 g_ut_register_bdev_status = -EINVAL; 2124 g_ut_attach_bdev_count = 0; 2125 2126 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2127 attach_ctrlr_done, NULL, NULL, NULL, false); 2128 CU_ASSERT(rc == 0); 2129 2130 spdk_delay_us(1000); 2131 poll_threads(); 2132 2133 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2134 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2135 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2136 2137 CU_ASSERT(attached_names[0] == NULL); 2138 2139 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2140 CU_ASSERT(rc == 0); 2141 2142 poll_threads(); 2143 spdk_delay_us(1000); 2144 poll_threads(); 2145 2146 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2147 2148 g_ut_register_bdev_status = 0; 2149 } 2150 2151 static void 2152 test_aer_cb(void) 2153 { 2154 struct spdk_nvme_transport_id trid = {}; 2155 struct spdk_nvme_ctrlr *ctrlr; 2156 struct nvme_ctrlr *nvme_ctrlr; 2157 struct nvme_bdev *bdev; 2158 const int STRING_SIZE = 32; 2159 const char *attached_names[STRING_SIZE]; 2160 union spdk_nvme_async_event_completion event = {}; 2161 struct spdk_nvme_cpl cpl = {}; 2162 int rc; 2163 2164 set_thread(0); 2165 2166 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2167 ut_init_trid(&trid); 2168 2169 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2170 * namespaces are populated. 2171 */ 2172 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2173 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2174 2175 ctrlr->ns[0].is_active = false; 2176 2177 g_ut_attach_ctrlr_status = 0; 2178 g_ut_attach_bdev_count = 3; 2179 2180 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2181 attach_ctrlr_done, NULL, NULL, NULL, false); 2182 CU_ASSERT(rc == 0); 2183 2184 spdk_delay_us(1000); 2185 poll_threads(); 2186 2187 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2188 poll_threads(); 2189 2190 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2191 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2192 2193 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2194 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2195 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2196 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2197 2198 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2199 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2200 CU_ASSERT(bdev->disk.blockcnt == 1024); 2201 2202 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2203 * change the size of the 4th namespace. 2204 */ 2205 ctrlr->ns[0].is_active = true; 2206 ctrlr->ns[2].is_active = false; 2207 ctrlr->nsdata[3].nsze = 2048; 2208 2209 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2210 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2211 cpl.cdw0 = event.raw; 2212 2213 aer_cb(nvme_ctrlr, &cpl); 2214 2215 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2216 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2217 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2218 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2219 CU_ASSERT(bdev->disk.blockcnt == 2048); 2220 2221 /* Change ANA state of active namespaces. */ 2222 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2223 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2224 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2225 2226 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2227 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2228 cpl.cdw0 = event.raw; 2229 2230 aer_cb(nvme_ctrlr, &cpl); 2231 2232 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2233 poll_threads(); 2234 2235 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2236 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2237 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2238 2239 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2240 CU_ASSERT(rc == 0); 2241 2242 poll_threads(); 2243 spdk_delay_us(1000); 2244 poll_threads(); 2245 2246 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2247 } 2248 2249 static void 2250 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2251 enum spdk_bdev_io_type io_type) 2252 { 2253 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2254 struct nvme_io_path *io_path; 2255 struct spdk_nvme_qpair *qpair; 2256 2257 io_path = bdev_nvme_find_io_path(nbdev_ch); 2258 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2259 qpair = io_path->qpair->qpair; 2260 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2261 2262 bdev_io->type = io_type; 2263 bdev_io->internal.in_submit_request = true; 2264 2265 bdev_nvme_submit_request(ch, bdev_io); 2266 2267 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2268 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2269 2270 poll_threads(); 2271 2272 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2273 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2274 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2275 } 2276 2277 static void 2278 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2279 enum spdk_bdev_io_type io_type) 2280 { 2281 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2282 struct nvme_io_path *io_path; 2283 struct spdk_nvme_qpair *qpair; 2284 2285 io_path = bdev_nvme_find_io_path(nbdev_ch); 2286 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2287 qpair = io_path->qpair->qpair; 2288 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2289 2290 bdev_io->type = io_type; 2291 bdev_io->internal.in_submit_request = true; 2292 2293 bdev_nvme_submit_request(ch, bdev_io); 2294 2295 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2296 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2297 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2298 } 2299 2300 static void 2301 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2302 { 2303 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2304 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2305 struct ut_nvme_req *req; 2306 struct nvme_io_path *io_path; 2307 struct spdk_nvme_qpair *qpair; 2308 2309 io_path = bdev_nvme_find_io_path(nbdev_ch); 2310 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2311 qpair = io_path->qpair->qpair; 2312 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2313 2314 /* Only compare and write now. */ 2315 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2316 bdev_io->internal.in_submit_request = true; 2317 2318 bdev_nvme_submit_request(ch, bdev_io); 2319 2320 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2321 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2322 CU_ASSERT(bio->first_fused_submitted == true); 2323 2324 /* First outstanding request is compare operation. */ 2325 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2326 SPDK_CU_ASSERT_FATAL(req != NULL); 2327 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2328 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2329 2330 poll_threads(); 2331 2332 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2333 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2334 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2335 } 2336 2337 static void 2338 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2339 struct spdk_nvme_ctrlr *ctrlr) 2340 { 2341 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2342 bdev_io->internal.in_submit_request = true; 2343 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2344 2345 bdev_nvme_submit_request(ch, bdev_io); 2346 2347 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2348 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2349 2350 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2351 poll_thread_times(1, 1); 2352 2353 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2354 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2355 2356 poll_thread_times(0, 1); 2357 2358 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2359 } 2360 2361 static void 2362 test_submit_nvme_cmd(void) 2363 { 2364 struct spdk_nvme_transport_id trid = {}; 2365 struct spdk_nvme_ctrlr *ctrlr; 2366 struct nvme_ctrlr *nvme_ctrlr; 2367 const int STRING_SIZE = 32; 2368 const char *attached_names[STRING_SIZE]; 2369 struct nvme_bdev *bdev; 2370 struct spdk_bdev_io *bdev_io; 2371 struct spdk_io_channel *ch; 2372 int rc; 2373 2374 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2375 ut_init_trid(&trid); 2376 2377 set_thread(1); 2378 2379 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2380 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2381 2382 g_ut_attach_ctrlr_status = 0; 2383 g_ut_attach_bdev_count = 1; 2384 2385 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2386 attach_ctrlr_done, NULL, NULL, NULL, false); 2387 CU_ASSERT(rc == 0); 2388 2389 spdk_delay_us(1000); 2390 poll_threads(); 2391 2392 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2393 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2394 2395 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2396 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2397 2398 set_thread(0); 2399 2400 ch = spdk_get_io_channel(bdev); 2401 SPDK_CU_ASSERT_FATAL(ch != NULL); 2402 2403 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2404 2405 bdev_io->u.bdev.iovs = NULL; 2406 2407 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2408 2409 ut_bdev_io_set_buf(bdev_io); 2410 2411 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2412 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2413 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2414 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2415 2416 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2417 2418 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2419 2420 /* Verify that ext NVME API is called when data is described by memory domain */ 2421 g_ut_read_ext_called = false; 2422 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2423 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2424 CU_ASSERT(g_ut_read_ext_called == true); 2425 g_ut_read_ext_called = false; 2426 bdev_io->u.bdev.memory_domain = NULL; 2427 2428 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2429 2430 free(bdev_io); 2431 2432 spdk_put_io_channel(ch); 2433 2434 poll_threads(); 2435 2436 set_thread(1); 2437 2438 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2439 CU_ASSERT(rc == 0); 2440 2441 poll_threads(); 2442 spdk_delay_us(1000); 2443 poll_threads(); 2444 2445 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2446 } 2447 2448 static void 2449 test_add_remove_trid(void) 2450 { 2451 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2452 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2453 struct nvme_ctrlr *nvme_ctrlr = NULL; 2454 const int STRING_SIZE = 32; 2455 const char *attached_names[STRING_SIZE]; 2456 struct nvme_path_id *ctrid; 2457 int rc; 2458 2459 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2460 ut_init_trid(&path1.trid); 2461 ut_init_trid2(&path2.trid); 2462 ut_init_trid3(&path3.trid); 2463 2464 set_thread(0); 2465 2466 g_ut_attach_ctrlr_status = 0; 2467 g_ut_attach_bdev_count = 0; 2468 2469 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2470 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2471 2472 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2473 attach_ctrlr_done, NULL, NULL, NULL, false); 2474 CU_ASSERT(rc == 0); 2475 2476 spdk_delay_us(1000); 2477 poll_threads(); 2478 2479 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2480 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2481 2482 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2483 2484 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2485 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2486 2487 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2488 attach_ctrlr_done, NULL, NULL, NULL, false); 2489 CU_ASSERT(rc == 0); 2490 2491 spdk_delay_us(1000); 2492 poll_threads(); 2493 2494 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2495 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2496 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2497 break; 2498 } 2499 } 2500 CU_ASSERT(ctrid != NULL); 2501 2502 /* trid3 is not in the registered list. */ 2503 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2504 CU_ASSERT(rc == -ENXIO); 2505 2506 /* trid2 is not used, and simply removed. */ 2507 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2508 CU_ASSERT(rc == 0); 2509 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2510 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2511 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2512 } 2513 2514 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2515 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2516 2517 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2518 attach_ctrlr_done, NULL, NULL, NULL, false); 2519 CU_ASSERT(rc == 0); 2520 2521 spdk_delay_us(1000); 2522 poll_threads(); 2523 2524 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2525 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2526 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2527 break; 2528 } 2529 } 2530 CU_ASSERT(ctrid != NULL); 2531 2532 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2533 * If we add path2 again, path2 should be inserted between path1 and path3. 2534 * Then, we remove path2. It is not used, and simply removed. 2535 */ 2536 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2537 2538 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2539 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2540 2541 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2542 attach_ctrlr_done, NULL, NULL, NULL, false); 2543 CU_ASSERT(rc == 0); 2544 2545 spdk_delay_us(1000); 2546 poll_threads(); 2547 2548 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2549 2550 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2551 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2552 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2553 2554 ctrid = TAILQ_NEXT(ctrid, link); 2555 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2556 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2557 2558 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2559 CU_ASSERT(rc == 0); 2560 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2561 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2562 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2563 } 2564 2565 /* path1 is currently used and path3 is an alternative path. 2566 * If we remove path1, path is changed to path3. 2567 */ 2568 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2569 CU_ASSERT(rc == 0); 2570 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2571 CU_ASSERT(nvme_ctrlr->resetting == true); 2572 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2573 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2574 } 2575 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2576 2577 poll_threads(); 2578 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2579 poll_threads(); 2580 2581 CU_ASSERT(nvme_ctrlr->resetting == false); 2582 2583 /* path3 is the current and only path. If we remove path3, the corresponding 2584 * nvme_ctrlr is removed. 2585 */ 2586 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2587 CU_ASSERT(rc == 0); 2588 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2589 2590 poll_threads(); 2591 spdk_delay_us(1000); 2592 poll_threads(); 2593 2594 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2595 2596 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2597 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2598 2599 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2600 attach_ctrlr_done, NULL, NULL, NULL, false); 2601 CU_ASSERT(rc == 0); 2602 2603 spdk_delay_us(1000); 2604 poll_threads(); 2605 2606 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2607 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2608 2609 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2610 2611 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2612 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2613 2614 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2615 attach_ctrlr_done, NULL, NULL, NULL, false); 2616 CU_ASSERT(rc == 0); 2617 2618 spdk_delay_us(1000); 2619 poll_threads(); 2620 2621 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2622 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2623 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2624 break; 2625 } 2626 } 2627 CU_ASSERT(ctrid != NULL); 2628 2629 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2630 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2631 CU_ASSERT(rc == 0); 2632 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2633 2634 poll_threads(); 2635 spdk_delay_us(1000); 2636 poll_threads(); 2637 2638 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2639 } 2640 2641 static void 2642 test_abort(void) 2643 { 2644 struct spdk_nvme_transport_id trid = {}; 2645 struct nvme_ctrlr_opts opts = {}; 2646 struct spdk_nvme_ctrlr *ctrlr; 2647 struct nvme_ctrlr *nvme_ctrlr; 2648 const int STRING_SIZE = 32; 2649 const char *attached_names[STRING_SIZE]; 2650 struct nvme_bdev *bdev; 2651 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2652 struct spdk_io_channel *ch1, *ch2; 2653 struct nvme_bdev_channel *nbdev_ch1; 2654 struct nvme_io_path *io_path1; 2655 struct nvme_qpair *nvme_qpair1; 2656 int rc; 2657 2658 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2659 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2660 * are submitted on thread 1. Both should succeed. 2661 */ 2662 2663 ut_init_trid(&trid); 2664 2665 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2666 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2667 2668 g_ut_attach_ctrlr_status = 0; 2669 g_ut_attach_bdev_count = 1; 2670 2671 set_thread(1); 2672 2673 opts.ctrlr_loss_timeout_sec = -1; 2674 opts.reconnect_delay_sec = 1; 2675 2676 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2677 attach_ctrlr_done, NULL, NULL, &opts, false); 2678 CU_ASSERT(rc == 0); 2679 2680 spdk_delay_us(1000); 2681 poll_threads(); 2682 2683 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2684 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2685 2686 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2687 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2688 2689 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2690 ut_bdev_io_set_buf(write_io); 2691 2692 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2693 ut_bdev_io_set_buf(fuse_io); 2694 2695 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2696 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2697 2698 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2699 2700 set_thread(0); 2701 2702 ch1 = spdk_get_io_channel(bdev); 2703 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2704 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2705 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2706 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2707 nvme_qpair1 = io_path1->qpair; 2708 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2709 2710 set_thread(1); 2711 2712 ch2 = spdk_get_io_channel(bdev); 2713 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2714 2715 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2716 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2717 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2718 2719 /* Aborting the already completed request should fail. */ 2720 write_io->internal.in_submit_request = true; 2721 bdev_nvme_submit_request(ch1, write_io); 2722 poll_threads(); 2723 2724 CU_ASSERT(write_io->internal.in_submit_request == false); 2725 2726 abort_io->u.abort.bio_to_abort = write_io; 2727 abort_io->internal.in_submit_request = true; 2728 2729 bdev_nvme_submit_request(ch1, abort_io); 2730 2731 poll_threads(); 2732 2733 CU_ASSERT(abort_io->internal.in_submit_request == false); 2734 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2735 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2736 2737 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2738 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2739 2740 admin_io->internal.in_submit_request = true; 2741 bdev_nvme_submit_request(ch1, admin_io); 2742 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2743 poll_threads(); 2744 2745 CU_ASSERT(admin_io->internal.in_submit_request == false); 2746 2747 abort_io->u.abort.bio_to_abort = admin_io; 2748 abort_io->internal.in_submit_request = true; 2749 2750 bdev_nvme_submit_request(ch2, abort_io); 2751 2752 poll_threads(); 2753 2754 CU_ASSERT(abort_io->internal.in_submit_request == false); 2755 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2756 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2757 2758 /* Aborting the write request should succeed. */ 2759 write_io->internal.in_submit_request = true; 2760 bdev_nvme_submit_request(ch1, write_io); 2761 2762 CU_ASSERT(write_io->internal.in_submit_request == true); 2763 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2764 2765 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2766 abort_io->u.abort.bio_to_abort = write_io; 2767 abort_io->internal.in_submit_request = true; 2768 2769 bdev_nvme_submit_request(ch1, abort_io); 2770 2771 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2772 poll_threads(); 2773 2774 CU_ASSERT(abort_io->internal.in_submit_request == false); 2775 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2776 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2777 CU_ASSERT(write_io->internal.in_submit_request == false); 2778 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2779 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2780 2781 /* Aborting the fuse request should succeed. */ 2782 fuse_io->internal.in_submit_request = true; 2783 bdev_nvme_submit_request(ch1, fuse_io); 2784 2785 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2786 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2787 2788 abort_io->u.abort.bio_to_abort = fuse_io; 2789 abort_io->internal.in_submit_request = true; 2790 2791 bdev_nvme_submit_request(ch1, abort_io); 2792 2793 spdk_delay_us(10000); 2794 poll_threads(); 2795 2796 CU_ASSERT(abort_io->internal.in_submit_request == false); 2797 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2798 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2799 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2800 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2801 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2802 2803 /* Aborting the admin request should succeed. */ 2804 admin_io->internal.in_submit_request = true; 2805 bdev_nvme_submit_request(ch1, admin_io); 2806 2807 CU_ASSERT(admin_io->internal.in_submit_request == true); 2808 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2809 2810 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2811 abort_io->u.abort.bio_to_abort = admin_io; 2812 abort_io->internal.in_submit_request = true; 2813 2814 bdev_nvme_submit_request(ch2, abort_io); 2815 2816 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2817 poll_threads(); 2818 2819 CU_ASSERT(abort_io->internal.in_submit_request == false); 2820 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2821 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2822 CU_ASSERT(admin_io->internal.in_submit_request == false); 2823 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2824 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2825 2826 set_thread(0); 2827 2828 /* If qpair is disconnected, it is freed and then reconnected via resetting 2829 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2830 * while resetting the nvme_ctrlr. 2831 */ 2832 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2833 2834 poll_thread_times(0, 3); 2835 2836 CU_ASSERT(nvme_qpair1->qpair == NULL); 2837 CU_ASSERT(nvme_ctrlr->resetting == true); 2838 2839 write_io->internal.in_submit_request = true; 2840 2841 bdev_nvme_submit_request(ch1, write_io); 2842 2843 CU_ASSERT(write_io->internal.in_submit_request == true); 2844 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 2845 2846 /* Aborting the queued write request should succeed immediately. */ 2847 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2848 abort_io->u.abort.bio_to_abort = write_io; 2849 abort_io->internal.in_submit_request = true; 2850 2851 bdev_nvme_submit_request(ch1, abort_io); 2852 2853 CU_ASSERT(abort_io->internal.in_submit_request == false); 2854 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2855 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2856 CU_ASSERT(write_io->internal.in_submit_request == false); 2857 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2858 2859 poll_threads(); 2860 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2861 poll_threads(); 2862 2863 spdk_put_io_channel(ch1); 2864 2865 set_thread(1); 2866 2867 spdk_put_io_channel(ch2); 2868 2869 poll_threads(); 2870 2871 free(write_io); 2872 free(fuse_io); 2873 free(admin_io); 2874 free(abort_io); 2875 2876 set_thread(1); 2877 2878 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2879 CU_ASSERT(rc == 0); 2880 2881 poll_threads(); 2882 spdk_delay_us(1000); 2883 poll_threads(); 2884 2885 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2886 } 2887 2888 static void 2889 test_get_io_qpair(void) 2890 { 2891 struct spdk_nvme_transport_id trid = {}; 2892 struct spdk_nvme_ctrlr ctrlr = {}; 2893 struct nvme_ctrlr *nvme_ctrlr = NULL; 2894 struct spdk_io_channel *ch; 2895 struct nvme_ctrlr_channel *ctrlr_ch; 2896 struct spdk_nvme_qpair *qpair; 2897 int rc; 2898 2899 ut_init_trid(&trid); 2900 TAILQ_INIT(&ctrlr.active_io_qpairs); 2901 2902 set_thread(0); 2903 2904 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2905 CU_ASSERT(rc == 0); 2906 2907 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2908 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2909 2910 ch = spdk_get_io_channel(nvme_ctrlr); 2911 SPDK_CU_ASSERT_FATAL(ch != NULL); 2912 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2913 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2914 2915 qpair = bdev_nvme_get_io_qpair(ch); 2916 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2917 2918 spdk_put_io_channel(ch); 2919 2920 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2921 CU_ASSERT(rc == 0); 2922 2923 poll_threads(); 2924 spdk_delay_us(1000); 2925 poll_threads(); 2926 2927 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2928 } 2929 2930 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2931 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2932 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2933 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2934 */ 2935 static void 2936 test_bdev_unregister(void) 2937 { 2938 struct spdk_nvme_transport_id trid = {}; 2939 struct spdk_nvme_ctrlr *ctrlr; 2940 struct nvme_ctrlr *nvme_ctrlr; 2941 struct nvme_ns *nvme_ns1, *nvme_ns2; 2942 const int STRING_SIZE = 32; 2943 const char *attached_names[STRING_SIZE]; 2944 struct nvme_bdev *bdev1, *bdev2; 2945 int rc; 2946 2947 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2948 ut_init_trid(&trid); 2949 2950 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2951 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2952 2953 g_ut_attach_ctrlr_status = 0; 2954 g_ut_attach_bdev_count = 2; 2955 2956 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2957 attach_ctrlr_done, NULL, NULL, NULL, false); 2958 CU_ASSERT(rc == 0); 2959 2960 spdk_delay_us(1000); 2961 poll_threads(); 2962 2963 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2964 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2965 2966 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2967 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2968 2969 bdev1 = nvme_ns1->bdev; 2970 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2971 2972 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2973 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2974 2975 bdev2 = nvme_ns2->bdev; 2976 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2977 2978 bdev_nvme_destruct(&bdev1->disk); 2979 bdev_nvme_destruct(&bdev2->disk); 2980 2981 poll_threads(); 2982 2983 CU_ASSERT(nvme_ns1->bdev == NULL); 2984 CU_ASSERT(nvme_ns2->bdev == NULL); 2985 2986 nvme_ctrlr->destruct = true; 2987 _nvme_ctrlr_destruct(nvme_ctrlr); 2988 2989 poll_threads(); 2990 spdk_delay_us(1000); 2991 poll_threads(); 2992 2993 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2994 } 2995 2996 static void 2997 test_compare_ns(void) 2998 { 2999 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3000 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3001 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3002 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3003 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3004 3005 /* No IDs are defined. */ 3006 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3007 3008 /* Only EUI64 are defined and not matched. */ 3009 nsdata1.eui64 = 0xABCDEF0123456789; 3010 nsdata2.eui64 = 0xBBCDEF0123456789; 3011 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3012 3013 /* Only EUI64 are defined and matched. */ 3014 nsdata2.eui64 = 0xABCDEF0123456789; 3015 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3016 3017 /* Only NGUID are defined and not matched. */ 3018 nsdata1.eui64 = 0x0; 3019 nsdata2.eui64 = 0x0; 3020 nsdata1.nguid[0] = 0x12; 3021 nsdata2.nguid[0] = 0x10; 3022 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3023 3024 /* Only NGUID are defined and matched. */ 3025 nsdata2.nguid[0] = 0x12; 3026 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3027 3028 /* Only UUID are defined and not matched. */ 3029 nsdata1.nguid[0] = 0x0; 3030 nsdata2.nguid[0] = 0x0; 3031 ns1.uuid = &uuid1; 3032 ns2.uuid = &uuid2; 3033 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3034 3035 /* Only one UUID is defined. */ 3036 ns1.uuid = NULL; 3037 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3038 3039 /* Only UUID are defined and matched. */ 3040 ns1.uuid = &uuid2; 3041 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3042 3043 /* All EUI64, NGUID, and UUID are defined and matched. */ 3044 nsdata1.eui64 = 0x123456789ABCDEF; 3045 nsdata2.eui64 = 0x123456789ABCDEF; 3046 nsdata1.nguid[15] = 0x34; 3047 nsdata2.nguid[15] = 0x34; 3048 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3049 3050 /* CSI are not matched. */ 3051 ns1.csi = SPDK_NVME_CSI_ZNS; 3052 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3053 } 3054 3055 static void 3056 test_init_ana_log_page(void) 3057 { 3058 struct spdk_nvme_transport_id trid = {}; 3059 struct spdk_nvme_ctrlr *ctrlr; 3060 struct nvme_ctrlr *nvme_ctrlr; 3061 const int STRING_SIZE = 32; 3062 const char *attached_names[STRING_SIZE]; 3063 int rc; 3064 3065 set_thread(0); 3066 3067 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3068 ut_init_trid(&trid); 3069 3070 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3071 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3072 3073 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3074 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3075 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3076 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3077 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3078 3079 g_ut_attach_ctrlr_status = 0; 3080 g_ut_attach_bdev_count = 5; 3081 3082 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3083 attach_ctrlr_done, NULL, NULL, NULL, false); 3084 CU_ASSERT(rc == 0); 3085 3086 spdk_delay_us(1000); 3087 poll_threads(); 3088 3089 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3090 poll_threads(); 3091 3092 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3093 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3094 3095 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3096 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3097 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3098 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3099 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3100 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3101 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3102 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3103 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3104 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3105 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3106 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3107 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3108 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3109 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3110 3111 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3112 CU_ASSERT(rc == 0); 3113 3114 poll_threads(); 3115 spdk_delay_us(1000); 3116 poll_threads(); 3117 3118 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3119 } 3120 3121 static void 3122 init_accel(void) 3123 { 3124 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3125 sizeof(int), "accel_p"); 3126 } 3127 3128 static void 3129 fini_accel(void) 3130 { 3131 spdk_io_device_unregister(g_accel_p, NULL); 3132 } 3133 3134 static void 3135 test_get_memory_domains(void) 3136 { 3137 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3138 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3139 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3140 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3141 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3142 struct spdk_memory_domain *domains[4] = {}; 3143 int rc = 0; 3144 3145 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3146 3147 /* nvme controller doesn't have memory domains */ 3148 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3149 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3150 CU_ASSERT(rc == 0); 3151 CU_ASSERT(domains[0] == NULL); 3152 CU_ASSERT(domains[1] == NULL); 3153 3154 /* nvme controller has a memory domain */ 3155 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3156 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3157 CU_ASSERT(rc == 1); 3158 CU_ASSERT(domains[0] != NULL); 3159 memset(domains, 0, sizeof(domains)); 3160 3161 /* multipath, 2 controllers report 1 memory domain each */ 3162 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3163 3164 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3165 CU_ASSERT(rc == 2); 3166 CU_ASSERT(domains[0] != NULL); 3167 CU_ASSERT(domains[1] != NULL); 3168 memset(domains, 0, sizeof(domains)); 3169 3170 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3171 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3172 CU_ASSERT(rc == 2); 3173 3174 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3175 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3176 CU_ASSERT(rc == 2); 3177 CU_ASSERT(domains[0] == NULL); 3178 CU_ASSERT(domains[1] == NULL); 3179 3180 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3181 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3182 CU_ASSERT(rc == 2); 3183 CU_ASSERT(domains[0] != NULL); 3184 CU_ASSERT(domains[1] == NULL); 3185 memset(domains, 0, sizeof(domains)); 3186 3187 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3188 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3189 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3190 CU_ASSERT(rc == 4); 3191 CU_ASSERT(domains[0] != NULL); 3192 CU_ASSERT(domains[1] != NULL); 3193 CU_ASSERT(domains[2] != NULL); 3194 CU_ASSERT(domains[3] != NULL); 3195 memset(domains, 0, sizeof(domains)); 3196 3197 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3198 * Array size is less than the number of memory domains */ 3199 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3200 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3201 CU_ASSERT(rc == 4); 3202 CU_ASSERT(domains[0] != NULL); 3203 CU_ASSERT(domains[1] != NULL); 3204 CU_ASSERT(domains[2] != NULL); 3205 CU_ASSERT(domains[3] == NULL); 3206 memset(domains, 0, sizeof(domains)); 3207 3208 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3209 } 3210 3211 static void 3212 test_reconnect_qpair(void) 3213 { 3214 struct spdk_nvme_transport_id trid = {}; 3215 struct spdk_nvme_ctrlr *ctrlr; 3216 struct nvme_ctrlr *nvme_ctrlr; 3217 const int STRING_SIZE = 32; 3218 const char *attached_names[STRING_SIZE]; 3219 struct nvme_bdev *bdev; 3220 struct spdk_io_channel *ch1, *ch2; 3221 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3222 struct nvme_io_path *io_path1, *io_path2; 3223 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3224 int rc; 3225 3226 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3227 ut_init_trid(&trid); 3228 3229 set_thread(0); 3230 3231 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3232 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3233 3234 g_ut_attach_ctrlr_status = 0; 3235 g_ut_attach_bdev_count = 1; 3236 3237 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3238 attach_ctrlr_done, NULL, NULL, NULL, false); 3239 CU_ASSERT(rc == 0); 3240 3241 spdk_delay_us(1000); 3242 poll_threads(); 3243 3244 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3245 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3246 3247 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3248 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3249 3250 ch1 = spdk_get_io_channel(bdev); 3251 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3252 3253 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3254 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3255 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3256 nvme_qpair1 = io_path1->qpair; 3257 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3258 3259 set_thread(1); 3260 3261 ch2 = spdk_get_io_channel(bdev); 3262 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3263 3264 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3265 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3266 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3267 nvme_qpair2 = io_path2->qpair; 3268 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3269 3270 /* If a qpair is disconnected, it is freed and then reconnected via 3271 * resetting the corresponding nvme_ctrlr. 3272 */ 3273 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3274 ctrlr->is_failed = true; 3275 3276 poll_thread_times(1, 3); 3277 CU_ASSERT(nvme_qpair1->qpair != NULL); 3278 CU_ASSERT(nvme_qpair2->qpair == NULL); 3279 CU_ASSERT(nvme_ctrlr->resetting == true); 3280 3281 poll_thread_times(0, 3); 3282 CU_ASSERT(nvme_qpair1->qpair == NULL); 3283 CU_ASSERT(nvme_qpair2->qpair == NULL); 3284 CU_ASSERT(ctrlr->is_failed == true); 3285 3286 poll_thread_times(1, 2); 3287 poll_thread_times(0, 1); 3288 CU_ASSERT(ctrlr->is_failed == false); 3289 CU_ASSERT(ctrlr->adminq.is_connected == false); 3290 3291 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3292 poll_thread_times(0, 2); 3293 CU_ASSERT(ctrlr->adminq.is_connected == true); 3294 3295 poll_thread_times(0, 1); 3296 poll_thread_times(1, 1); 3297 CU_ASSERT(nvme_qpair1->qpair != NULL); 3298 CU_ASSERT(nvme_qpair2->qpair != NULL); 3299 CU_ASSERT(nvme_ctrlr->resetting == true); 3300 3301 poll_thread_times(0, 2); 3302 poll_thread_times(1, 1); 3303 poll_thread_times(0, 1); 3304 CU_ASSERT(nvme_ctrlr->resetting == false); 3305 3306 poll_threads(); 3307 3308 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3309 * fails, the qpair is just freed. 3310 */ 3311 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3312 ctrlr->is_failed = true; 3313 ctrlr->fail_reset = true; 3314 3315 poll_thread_times(1, 3); 3316 CU_ASSERT(nvme_qpair1->qpair != NULL); 3317 CU_ASSERT(nvme_qpair2->qpair == NULL); 3318 CU_ASSERT(nvme_ctrlr->resetting == true); 3319 3320 poll_thread_times(0, 3); 3321 poll_thread_times(1, 1); 3322 CU_ASSERT(nvme_qpair1->qpair == NULL); 3323 CU_ASSERT(nvme_qpair2->qpair == NULL); 3324 CU_ASSERT(ctrlr->is_failed == true); 3325 3326 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3327 poll_thread_times(0, 3); 3328 poll_thread_times(1, 1); 3329 poll_thread_times(0, 1); 3330 CU_ASSERT(ctrlr->is_failed == true); 3331 CU_ASSERT(nvme_ctrlr->resetting == false); 3332 CU_ASSERT(nvme_qpair1->qpair == NULL); 3333 CU_ASSERT(nvme_qpair2->qpair == NULL); 3334 3335 poll_threads(); 3336 3337 spdk_put_io_channel(ch2); 3338 3339 set_thread(0); 3340 3341 spdk_put_io_channel(ch1); 3342 3343 poll_threads(); 3344 3345 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3346 CU_ASSERT(rc == 0); 3347 3348 poll_threads(); 3349 spdk_delay_us(1000); 3350 poll_threads(); 3351 3352 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3353 } 3354 3355 static void 3356 test_create_bdev_ctrlr(void) 3357 { 3358 struct nvme_path_id path1 = {}, path2 = {}; 3359 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3360 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3361 const int STRING_SIZE = 32; 3362 const char *attached_names[STRING_SIZE]; 3363 int rc; 3364 3365 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3366 ut_init_trid(&path1.trid); 3367 ut_init_trid2(&path2.trid); 3368 3369 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3370 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3371 3372 g_ut_attach_ctrlr_status = 0; 3373 g_ut_attach_bdev_count = 0; 3374 3375 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3376 attach_ctrlr_done, NULL, NULL, NULL, true); 3377 CU_ASSERT(rc == 0); 3378 3379 spdk_delay_us(1000); 3380 poll_threads(); 3381 3382 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3383 poll_threads(); 3384 3385 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3386 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3387 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3388 3389 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3390 g_ut_attach_ctrlr_status = -EINVAL; 3391 3392 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3393 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3394 3395 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3396 3397 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3398 attach_ctrlr_done, NULL, NULL, NULL, true); 3399 CU_ASSERT(rc == 0); 3400 3401 spdk_delay_us(1000); 3402 poll_threads(); 3403 3404 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3405 poll_threads(); 3406 3407 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3408 3409 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3410 g_ut_attach_ctrlr_status = 0; 3411 3412 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3413 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3414 3415 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3416 attach_ctrlr_done, NULL, NULL, NULL, true); 3417 CU_ASSERT(rc == 0); 3418 3419 spdk_delay_us(1000); 3420 poll_threads(); 3421 3422 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3423 poll_threads(); 3424 3425 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3426 3427 /* Delete two ctrlrs at once. */ 3428 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3429 CU_ASSERT(rc == 0); 3430 3431 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3432 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3433 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3434 3435 poll_threads(); 3436 spdk_delay_us(1000); 3437 poll_threads(); 3438 3439 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3440 3441 /* Add two ctrlrs and delete one by one. */ 3442 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3443 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3444 3445 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3446 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3447 3448 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3449 attach_ctrlr_done, NULL, NULL, NULL, true); 3450 CU_ASSERT(rc == 0); 3451 3452 spdk_delay_us(1000); 3453 poll_threads(); 3454 3455 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3456 poll_threads(); 3457 3458 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3459 attach_ctrlr_done, NULL, NULL, NULL, true); 3460 CU_ASSERT(rc == 0); 3461 3462 spdk_delay_us(1000); 3463 poll_threads(); 3464 3465 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3466 poll_threads(); 3467 3468 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3469 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3470 3471 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3472 CU_ASSERT(rc == 0); 3473 3474 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3475 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3476 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3477 3478 poll_threads(); 3479 spdk_delay_us(1000); 3480 poll_threads(); 3481 3482 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3483 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3484 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3485 3486 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3487 CU_ASSERT(rc == 0); 3488 3489 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3490 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3491 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3492 3493 poll_threads(); 3494 spdk_delay_us(1000); 3495 poll_threads(); 3496 3497 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3498 } 3499 3500 static struct nvme_ns * 3501 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3502 { 3503 struct nvme_ns *nvme_ns; 3504 3505 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3506 if (nvme_ns->ctrlr == nvme_ctrlr) { 3507 return nvme_ns; 3508 } 3509 } 3510 3511 return NULL; 3512 } 3513 3514 static void 3515 test_add_multi_ns_to_bdev(void) 3516 { 3517 struct nvme_path_id path1 = {}, path2 = {}; 3518 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3519 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3520 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3521 struct nvme_ns *nvme_ns1, *nvme_ns2; 3522 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3523 const int STRING_SIZE = 32; 3524 const char *attached_names[STRING_SIZE]; 3525 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3526 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3527 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3528 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3529 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3530 int rc; 3531 3532 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3533 ut_init_trid(&path1.trid); 3534 ut_init_trid2(&path2.trid); 3535 3536 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3537 3538 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3539 * namespaces are populated. 3540 */ 3541 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3542 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3543 3544 ctrlr1->ns[1].is_active = false; 3545 ctrlr1->ns[4].is_active = false; 3546 ctrlr1->ns[0].uuid = &uuid1; 3547 ctrlr1->ns[2].uuid = &uuid3; 3548 ctrlr1->ns[3].uuid = &uuid4; 3549 3550 g_ut_attach_ctrlr_status = 0; 3551 g_ut_attach_bdev_count = 3; 3552 3553 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3554 attach_ctrlr_done, NULL, NULL, NULL, true); 3555 CU_ASSERT(rc == 0); 3556 3557 spdk_delay_us(1000); 3558 poll_threads(); 3559 3560 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3561 poll_threads(); 3562 3563 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3564 * namespaces are populated. The uuid of 4th namespace is different, and hence 3565 * adding 4th namespace to a bdev should fail. 3566 */ 3567 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3568 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3569 3570 ctrlr2->ns[2].is_active = false; 3571 ctrlr2->ns[4].is_active = false; 3572 ctrlr2->ns[0].uuid = &uuid1; 3573 ctrlr2->ns[1].uuid = &uuid2; 3574 ctrlr2->ns[3].uuid = &uuid44; 3575 3576 g_ut_attach_ctrlr_status = 0; 3577 g_ut_attach_bdev_count = 2; 3578 3579 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3580 attach_ctrlr_done, NULL, NULL, NULL, true); 3581 CU_ASSERT(rc == 0); 3582 3583 spdk_delay_us(1000); 3584 poll_threads(); 3585 3586 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3587 poll_threads(); 3588 3589 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3590 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3591 3592 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3593 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3594 3595 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3596 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3597 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3598 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3599 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3600 3601 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3602 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3603 3604 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3605 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3606 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3607 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3608 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3609 3610 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3611 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3612 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3613 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3614 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3615 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3616 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3617 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3618 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3619 3620 CU_ASSERT(bdev1->ref == 2); 3621 CU_ASSERT(bdev2->ref == 1); 3622 CU_ASSERT(bdev3->ref == 1); 3623 CU_ASSERT(bdev4->ref == 1); 3624 3625 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3626 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3627 CU_ASSERT(rc == 0); 3628 3629 poll_threads(); 3630 spdk_delay_us(1000); 3631 poll_threads(); 3632 3633 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3634 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3635 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3636 3637 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3638 CU_ASSERT(rc == 0); 3639 3640 poll_threads(); 3641 spdk_delay_us(1000); 3642 poll_threads(); 3643 3644 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3645 3646 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3647 * can be deleted when the bdev subsystem shutdown. 3648 */ 3649 g_ut_attach_bdev_count = 1; 3650 3651 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3652 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3653 3654 ctrlr1->ns[0].uuid = &uuid1; 3655 3656 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3657 attach_ctrlr_done, NULL, NULL, NULL, true); 3658 CU_ASSERT(rc == 0); 3659 3660 spdk_delay_us(1000); 3661 poll_threads(); 3662 3663 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3664 poll_threads(); 3665 3666 ut_init_trid2(&path2.trid); 3667 3668 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3669 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3670 3671 ctrlr2->ns[0].uuid = &uuid1; 3672 3673 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3674 attach_ctrlr_done, NULL, NULL, NULL, true); 3675 CU_ASSERT(rc == 0); 3676 3677 spdk_delay_us(1000); 3678 poll_threads(); 3679 3680 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3681 poll_threads(); 3682 3683 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3684 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3685 3686 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3687 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3688 3689 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3690 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3691 3692 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3693 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3694 3695 /* Check if a nvme_bdev has two nvme_ns. */ 3696 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3697 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3698 CU_ASSERT(nvme_ns1->bdev == bdev1); 3699 3700 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3701 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3702 CU_ASSERT(nvme_ns2->bdev == bdev1); 3703 3704 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3705 bdev_nvme_destruct(&bdev1->disk); 3706 3707 poll_threads(); 3708 3709 CU_ASSERT(nvme_ns1->bdev == NULL); 3710 CU_ASSERT(nvme_ns2->bdev == NULL); 3711 3712 nvme_ctrlr1->destruct = true; 3713 _nvme_ctrlr_destruct(nvme_ctrlr1); 3714 3715 poll_threads(); 3716 spdk_delay_us(1000); 3717 poll_threads(); 3718 3719 nvme_ctrlr2->destruct = true; 3720 _nvme_ctrlr_destruct(nvme_ctrlr2); 3721 3722 poll_threads(); 3723 spdk_delay_us(1000); 3724 poll_threads(); 3725 3726 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3727 } 3728 3729 static void 3730 test_add_multi_io_paths_to_nbdev_ch(void) 3731 { 3732 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3733 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3734 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3735 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3736 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3737 const int STRING_SIZE = 32; 3738 const char *attached_names[STRING_SIZE]; 3739 struct nvme_bdev *bdev; 3740 struct spdk_io_channel *ch; 3741 struct nvme_bdev_channel *nbdev_ch; 3742 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3743 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3744 int rc; 3745 3746 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3747 ut_init_trid(&path1.trid); 3748 ut_init_trid2(&path2.trid); 3749 ut_init_trid3(&path3.trid); 3750 g_ut_attach_ctrlr_status = 0; 3751 g_ut_attach_bdev_count = 1; 3752 3753 set_thread(1); 3754 3755 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3756 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3757 3758 ctrlr1->ns[0].uuid = &uuid1; 3759 3760 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3761 attach_ctrlr_done, NULL, NULL, NULL, true); 3762 CU_ASSERT(rc == 0); 3763 3764 spdk_delay_us(1000); 3765 poll_threads(); 3766 3767 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3768 poll_threads(); 3769 3770 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3771 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3772 3773 ctrlr2->ns[0].uuid = &uuid1; 3774 3775 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3776 attach_ctrlr_done, NULL, NULL, NULL, true); 3777 CU_ASSERT(rc == 0); 3778 3779 spdk_delay_us(1000); 3780 poll_threads(); 3781 3782 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3783 poll_threads(); 3784 3785 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3786 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3787 3788 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3789 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3790 3791 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3792 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3793 3794 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3795 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3796 3797 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3798 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3799 3800 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3801 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3802 3803 set_thread(0); 3804 3805 ch = spdk_get_io_channel(bdev); 3806 SPDK_CU_ASSERT_FATAL(ch != NULL); 3807 nbdev_ch = spdk_io_channel_get_ctx(ch); 3808 3809 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3810 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3811 3812 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3813 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3814 3815 set_thread(1); 3816 3817 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3818 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3819 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3820 3821 ctrlr3->ns[0].uuid = &uuid1; 3822 3823 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3824 attach_ctrlr_done, NULL, NULL, NULL, true); 3825 CU_ASSERT(rc == 0); 3826 3827 spdk_delay_us(1000); 3828 poll_threads(); 3829 3830 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3831 poll_threads(); 3832 3833 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3834 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3835 3836 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3837 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3838 3839 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3840 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3841 3842 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3843 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3844 CU_ASSERT(rc == 0); 3845 3846 poll_threads(); 3847 spdk_delay_us(1000); 3848 poll_threads(); 3849 3850 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3851 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3852 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3853 3854 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3855 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3856 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3857 3858 set_thread(0); 3859 3860 spdk_put_io_channel(ch); 3861 3862 poll_threads(); 3863 3864 set_thread(1); 3865 3866 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3867 CU_ASSERT(rc == 0); 3868 3869 poll_threads(); 3870 spdk_delay_us(1000); 3871 poll_threads(); 3872 3873 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3874 } 3875 3876 static void 3877 test_admin_path(void) 3878 { 3879 struct nvme_path_id path1 = {}, path2 = {}; 3880 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3881 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3882 const int STRING_SIZE = 32; 3883 const char *attached_names[STRING_SIZE]; 3884 struct nvme_bdev *bdev; 3885 struct spdk_io_channel *ch; 3886 struct spdk_bdev_io *bdev_io; 3887 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3888 int rc; 3889 3890 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3891 ut_init_trid(&path1.trid); 3892 ut_init_trid2(&path2.trid); 3893 g_ut_attach_ctrlr_status = 0; 3894 g_ut_attach_bdev_count = 1; 3895 3896 set_thread(0); 3897 3898 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3899 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3900 3901 ctrlr1->ns[0].uuid = &uuid1; 3902 3903 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3904 attach_ctrlr_done, NULL, NULL, NULL, true); 3905 CU_ASSERT(rc == 0); 3906 3907 spdk_delay_us(1000); 3908 poll_threads(); 3909 3910 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3911 poll_threads(); 3912 3913 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3914 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3915 3916 ctrlr2->ns[0].uuid = &uuid1; 3917 3918 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3919 attach_ctrlr_done, NULL, NULL, NULL, true); 3920 CU_ASSERT(rc == 0); 3921 3922 spdk_delay_us(1000); 3923 poll_threads(); 3924 3925 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3926 poll_threads(); 3927 3928 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3929 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3930 3931 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3932 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3933 3934 ch = spdk_get_io_channel(bdev); 3935 SPDK_CU_ASSERT_FATAL(ch != NULL); 3936 3937 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3938 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3939 3940 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3941 * submitted to ctrlr2. 3942 */ 3943 ctrlr1->is_failed = true; 3944 bdev_io->internal.in_submit_request = true; 3945 3946 bdev_nvme_submit_request(ch, bdev_io); 3947 3948 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3949 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3950 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3951 3952 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3953 poll_threads(); 3954 3955 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3956 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3957 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3958 3959 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3960 ctrlr2->is_failed = true; 3961 bdev_io->internal.in_submit_request = true; 3962 3963 bdev_nvme_submit_request(ch, bdev_io); 3964 3965 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3966 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3967 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3968 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3969 3970 free(bdev_io); 3971 3972 spdk_put_io_channel(ch); 3973 3974 poll_threads(); 3975 3976 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3977 CU_ASSERT(rc == 0); 3978 3979 poll_threads(); 3980 spdk_delay_us(1000); 3981 poll_threads(); 3982 3983 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3984 } 3985 3986 static struct nvme_io_path * 3987 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3988 struct nvme_ctrlr *nvme_ctrlr) 3989 { 3990 struct nvme_io_path *io_path; 3991 3992 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3993 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3994 return io_path; 3995 } 3996 } 3997 3998 return NULL; 3999 } 4000 4001 static void 4002 test_reset_bdev_ctrlr(void) 4003 { 4004 struct nvme_path_id path1 = {}, path2 = {}; 4005 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4006 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4007 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4008 struct nvme_path_id *curr_path1, *curr_path2; 4009 const int STRING_SIZE = 32; 4010 const char *attached_names[STRING_SIZE]; 4011 struct nvme_bdev *bdev; 4012 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4013 struct nvme_bdev_io *first_bio; 4014 struct spdk_io_channel *ch1, *ch2; 4015 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4016 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4017 int rc; 4018 4019 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4020 ut_init_trid(&path1.trid); 4021 ut_init_trid2(&path2.trid); 4022 g_ut_attach_ctrlr_status = 0; 4023 g_ut_attach_bdev_count = 1; 4024 4025 set_thread(0); 4026 4027 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4028 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4029 4030 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4031 attach_ctrlr_done, NULL, NULL, NULL, true); 4032 CU_ASSERT(rc == 0); 4033 4034 spdk_delay_us(1000); 4035 poll_threads(); 4036 4037 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4038 poll_threads(); 4039 4040 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4041 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4042 4043 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4044 attach_ctrlr_done, NULL, NULL, NULL, true); 4045 CU_ASSERT(rc == 0); 4046 4047 spdk_delay_us(1000); 4048 poll_threads(); 4049 4050 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4051 poll_threads(); 4052 4053 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4054 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4055 4056 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4057 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4058 4059 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4060 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4061 4062 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4063 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4064 4065 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4066 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4067 4068 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4069 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4070 4071 set_thread(0); 4072 4073 ch1 = spdk_get_io_channel(bdev); 4074 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4075 4076 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4077 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4078 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4079 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4080 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4081 4082 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4083 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4084 4085 set_thread(1); 4086 4087 ch2 = spdk_get_io_channel(bdev); 4088 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4089 4090 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4091 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4092 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4093 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4094 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4095 4096 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4097 4098 /* The first reset request from bdev_io is submitted on thread 0. 4099 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4100 * 4101 * A few extra polls are necessary after resetting ctrlr1 to check 4102 * pending reset requests for ctrlr1. 4103 */ 4104 ctrlr1->is_failed = true; 4105 curr_path1->last_failed_tsc = spdk_get_ticks(); 4106 ctrlr2->is_failed = true; 4107 curr_path2->last_failed_tsc = spdk_get_ticks(); 4108 4109 set_thread(0); 4110 4111 bdev_nvme_submit_request(ch1, first_bdev_io); 4112 CU_ASSERT(first_bio->io_path == io_path11); 4113 CU_ASSERT(nvme_ctrlr1->resetting == true); 4114 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4115 4116 poll_thread_times(0, 3); 4117 CU_ASSERT(io_path11->qpair->qpair == NULL); 4118 CU_ASSERT(io_path21->qpair->qpair != NULL); 4119 4120 poll_thread_times(1, 2); 4121 CU_ASSERT(io_path11->qpair->qpair == NULL); 4122 CU_ASSERT(io_path21->qpair->qpair == NULL); 4123 CU_ASSERT(ctrlr1->is_failed == true); 4124 4125 poll_thread_times(0, 1); 4126 CU_ASSERT(nvme_ctrlr1->resetting == true); 4127 CU_ASSERT(ctrlr1->is_failed == false); 4128 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4129 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4130 4131 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4132 poll_thread_times(0, 2); 4133 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4134 4135 poll_thread_times(0, 1); 4136 CU_ASSERT(io_path11->qpair->qpair != NULL); 4137 CU_ASSERT(io_path21->qpair->qpair == NULL); 4138 4139 poll_thread_times(1, 1); 4140 CU_ASSERT(io_path11->qpair->qpair != NULL); 4141 CU_ASSERT(io_path21->qpair->qpair != NULL); 4142 4143 poll_thread_times(0, 2); 4144 CU_ASSERT(nvme_ctrlr1->resetting == true); 4145 poll_thread_times(1, 1); 4146 CU_ASSERT(nvme_ctrlr1->resetting == true); 4147 poll_thread_times(0, 2); 4148 CU_ASSERT(nvme_ctrlr1->resetting == false); 4149 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4150 CU_ASSERT(first_bio->io_path == io_path12); 4151 CU_ASSERT(nvme_ctrlr2->resetting == true); 4152 4153 poll_thread_times(0, 3); 4154 CU_ASSERT(io_path12->qpair->qpair == NULL); 4155 CU_ASSERT(io_path22->qpair->qpair != NULL); 4156 4157 poll_thread_times(1, 2); 4158 CU_ASSERT(io_path12->qpair->qpair == NULL); 4159 CU_ASSERT(io_path22->qpair->qpair == NULL); 4160 CU_ASSERT(ctrlr2->is_failed == true); 4161 4162 poll_thread_times(0, 1); 4163 CU_ASSERT(nvme_ctrlr2->resetting == true); 4164 CU_ASSERT(ctrlr2->is_failed == false); 4165 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4166 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4167 4168 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4169 poll_thread_times(0, 2); 4170 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4171 4172 poll_thread_times(0, 1); 4173 CU_ASSERT(io_path12->qpair->qpair != NULL); 4174 CU_ASSERT(io_path22->qpair->qpair == NULL); 4175 4176 poll_thread_times(1, 2); 4177 CU_ASSERT(io_path12->qpair->qpair != NULL); 4178 CU_ASSERT(io_path22->qpair->qpair != NULL); 4179 4180 poll_thread_times(0, 2); 4181 CU_ASSERT(nvme_ctrlr2->resetting == true); 4182 poll_thread_times(1, 1); 4183 CU_ASSERT(nvme_ctrlr2->resetting == true); 4184 poll_thread_times(0, 2); 4185 CU_ASSERT(first_bio->io_path == NULL); 4186 CU_ASSERT(nvme_ctrlr2->resetting == false); 4187 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4188 4189 poll_threads(); 4190 4191 /* There is a race between two reset requests from bdev_io. 4192 * 4193 * The first reset request is submitted on thread 0, and the second reset 4194 * request is submitted on thread 1 while the first is resetting ctrlr1. 4195 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4196 * both reset requests go to ctrlr2. The first comes earlier than the second. 4197 * The second is pending on ctrlr2 again. After the first completes resetting 4198 * ctrl2, both complete successfully. 4199 */ 4200 ctrlr1->is_failed = true; 4201 curr_path1->last_failed_tsc = spdk_get_ticks(); 4202 ctrlr2->is_failed = true; 4203 curr_path2->last_failed_tsc = spdk_get_ticks(); 4204 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4205 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4206 4207 set_thread(0); 4208 4209 bdev_nvme_submit_request(ch1, first_bdev_io); 4210 4211 set_thread(1); 4212 4213 bdev_nvme_submit_request(ch2, second_bdev_io); 4214 4215 CU_ASSERT(nvme_ctrlr1->resetting == true); 4216 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4217 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == 4218 (struct nvme_bdev_io *)second_bdev_io->driver_ctx); 4219 4220 poll_threads(); 4221 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4222 poll_threads(); 4223 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4224 poll_threads(); 4225 4226 CU_ASSERT(ctrlr1->is_failed == false); 4227 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4228 CU_ASSERT(ctrlr2->is_failed == false); 4229 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4230 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4231 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4232 4233 set_thread(0); 4234 4235 spdk_put_io_channel(ch1); 4236 4237 set_thread(1); 4238 4239 spdk_put_io_channel(ch2); 4240 4241 poll_threads(); 4242 4243 set_thread(0); 4244 4245 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4246 CU_ASSERT(rc == 0); 4247 4248 poll_threads(); 4249 spdk_delay_us(1000); 4250 poll_threads(); 4251 4252 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4253 4254 free(first_bdev_io); 4255 free(second_bdev_io); 4256 } 4257 4258 static void 4259 test_find_io_path(void) 4260 { 4261 struct nvme_bdev_channel nbdev_ch = { 4262 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4263 }; 4264 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4265 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4266 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4267 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4268 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4269 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4270 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 4271 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4272 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4273 4274 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4275 4276 /* Test if io_path whose ANA state is not accessible is excluded. */ 4277 4278 nvme_qpair1.qpair = &qpair1; 4279 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4280 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4281 4282 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4283 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4284 4285 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4286 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4287 4288 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4289 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4290 4291 nbdev_ch.current_io_path = NULL; 4292 4293 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4294 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4295 4296 nbdev_ch.current_io_path = NULL; 4297 4298 /* Test if io_path whose qpair is resetting is excluded. */ 4299 4300 nvme_qpair1.qpair = NULL; 4301 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4302 4303 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4304 4305 /* Test if ANA optimized state or the first found ANA non-optimized state 4306 * is prioritized. 4307 */ 4308 4309 nvme_qpair1.qpair = &qpair1; 4310 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4311 nvme_qpair2.qpair = &qpair2; 4312 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4313 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4314 4315 nbdev_ch.current_io_path = NULL; 4316 4317 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4318 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4319 4320 nbdev_ch.current_io_path = NULL; 4321 } 4322 4323 static void 4324 test_retry_io_if_ana_state_is_updating(void) 4325 { 4326 struct nvme_path_id path = {}; 4327 struct nvme_ctrlr_opts opts = {}; 4328 struct spdk_nvme_ctrlr *ctrlr; 4329 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4330 struct nvme_ctrlr *nvme_ctrlr; 4331 const int STRING_SIZE = 32; 4332 const char *attached_names[STRING_SIZE]; 4333 struct nvme_bdev *bdev; 4334 struct nvme_ns *nvme_ns; 4335 struct spdk_bdev_io *bdev_io1; 4336 struct spdk_io_channel *ch; 4337 struct nvme_bdev_channel *nbdev_ch; 4338 struct nvme_io_path *io_path; 4339 struct nvme_qpair *nvme_qpair; 4340 int rc; 4341 4342 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4343 ut_init_trid(&path.trid); 4344 4345 set_thread(0); 4346 4347 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4348 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4349 4350 g_ut_attach_ctrlr_status = 0; 4351 g_ut_attach_bdev_count = 1; 4352 4353 opts.ctrlr_loss_timeout_sec = -1; 4354 opts.reconnect_delay_sec = 1; 4355 4356 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4357 attach_ctrlr_done, NULL, NULL, &opts, false); 4358 CU_ASSERT(rc == 0); 4359 4360 spdk_delay_us(1000); 4361 poll_threads(); 4362 4363 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4364 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4365 4366 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4367 CU_ASSERT(nvme_ctrlr != NULL); 4368 4369 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4370 CU_ASSERT(bdev != NULL); 4371 4372 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4373 CU_ASSERT(nvme_ns != NULL); 4374 4375 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4376 ut_bdev_io_set_buf(bdev_io1); 4377 4378 ch = spdk_get_io_channel(bdev); 4379 SPDK_CU_ASSERT_FATAL(ch != NULL); 4380 4381 nbdev_ch = spdk_io_channel_get_ctx(ch); 4382 4383 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4384 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4385 4386 nvme_qpair = io_path->qpair; 4387 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4388 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4389 4390 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4391 4392 /* If qpair is connected, I/O should succeed. */ 4393 bdev_io1->internal.in_submit_request = true; 4394 4395 bdev_nvme_submit_request(ch, bdev_io1); 4396 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4397 4398 poll_threads(); 4399 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4400 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4401 4402 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4403 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4404 nbdev_ch->current_io_path = NULL; 4405 4406 bdev_io1->internal.in_submit_request = true; 4407 4408 bdev_nvme_submit_request(ch, bdev_io1); 4409 4410 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4411 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4412 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4413 4414 /* ANA state became accessible while I/O was queued. */ 4415 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4416 4417 spdk_delay_us(1000000); 4418 4419 poll_thread_times(0, 1); 4420 4421 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4422 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4423 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4424 4425 poll_threads(); 4426 4427 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4428 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4429 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4430 4431 free(bdev_io1); 4432 4433 spdk_put_io_channel(ch); 4434 4435 poll_threads(); 4436 4437 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4438 CU_ASSERT(rc == 0); 4439 4440 poll_threads(); 4441 spdk_delay_us(1000); 4442 poll_threads(); 4443 4444 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4445 } 4446 4447 static void 4448 test_retry_io_for_io_path_error(void) 4449 { 4450 struct nvme_path_id path1 = {}, path2 = {}; 4451 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4452 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4453 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4454 const int STRING_SIZE = 32; 4455 const char *attached_names[STRING_SIZE]; 4456 struct nvme_bdev *bdev; 4457 struct nvme_ns *nvme_ns1, *nvme_ns2; 4458 struct spdk_bdev_io *bdev_io; 4459 struct nvme_bdev_io *bio; 4460 struct spdk_io_channel *ch; 4461 struct nvme_bdev_channel *nbdev_ch; 4462 struct nvme_io_path *io_path1, *io_path2; 4463 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4464 struct ut_nvme_req *req; 4465 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4466 int rc; 4467 4468 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4469 ut_init_trid(&path1.trid); 4470 ut_init_trid2(&path2.trid); 4471 4472 g_opts.bdev_retry_count = 1; 4473 4474 set_thread(0); 4475 4476 g_ut_attach_ctrlr_status = 0; 4477 g_ut_attach_bdev_count = 1; 4478 4479 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4480 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4481 4482 ctrlr1->ns[0].uuid = &uuid1; 4483 4484 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4485 attach_ctrlr_done, NULL, NULL, NULL, true); 4486 CU_ASSERT(rc == 0); 4487 4488 spdk_delay_us(1000); 4489 poll_threads(); 4490 4491 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4492 poll_threads(); 4493 4494 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4495 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4496 4497 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4498 CU_ASSERT(nvme_ctrlr1 != NULL); 4499 4500 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4501 CU_ASSERT(bdev != NULL); 4502 4503 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4504 CU_ASSERT(nvme_ns1 != NULL); 4505 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4506 4507 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4508 ut_bdev_io_set_buf(bdev_io); 4509 4510 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4511 4512 ch = spdk_get_io_channel(bdev); 4513 SPDK_CU_ASSERT_FATAL(ch != NULL); 4514 4515 nbdev_ch = spdk_io_channel_get_ctx(ch); 4516 4517 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4518 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4519 4520 nvme_qpair1 = io_path1->qpair; 4521 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4522 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4523 4524 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4525 4526 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4527 bdev_io->internal.in_submit_request = true; 4528 4529 bdev_nvme_submit_request(ch, bdev_io); 4530 4531 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4532 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4533 4534 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4535 SPDK_CU_ASSERT_FATAL(req != NULL); 4536 4537 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4538 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4539 req->cpl.status.dnr = 1; 4540 4541 poll_thread_times(0, 1); 4542 4543 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4544 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4545 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4546 4547 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4548 bdev_io->internal.in_submit_request = true; 4549 4550 bdev_nvme_submit_request(ch, bdev_io); 4551 4552 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4553 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4554 4555 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4556 SPDK_CU_ASSERT_FATAL(req != NULL); 4557 4558 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4559 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4560 4561 poll_thread_times(0, 1); 4562 4563 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4564 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4565 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4566 4567 poll_threads(); 4568 4569 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4570 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4571 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4572 4573 /* Add io_path2 dynamically, and create a multipath configuration. */ 4574 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4575 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4576 4577 ctrlr2->ns[0].uuid = &uuid1; 4578 4579 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4580 attach_ctrlr_done, NULL, NULL, NULL, true); 4581 CU_ASSERT(rc == 0); 4582 4583 spdk_delay_us(1000); 4584 poll_threads(); 4585 4586 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4587 poll_threads(); 4588 4589 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4590 CU_ASSERT(nvme_ctrlr2 != NULL); 4591 4592 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4593 CU_ASSERT(nvme_ns2 != NULL); 4594 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4595 4596 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4597 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4598 4599 nvme_qpair2 = io_path2->qpair; 4600 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4601 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4602 4603 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4604 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4605 * So after a retry, I/O is submitted to io_path2 and should succeed. 4606 */ 4607 bdev_io->internal.in_submit_request = true; 4608 4609 bdev_nvme_submit_request(ch, bdev_io); 4610 4611 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4612 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4613 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4614 4615 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4616 SPDK_CU_ASSERT_FATAL(req != NULL); 4617 4618 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4619 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4620 4621 poll_thread_times(0, 1); 4622 4623 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4624 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4625 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4626 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4627 4628 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4629 nvme_qpair1->qpair = NULL; 4630 4631 poll_threads(); 4632 4633 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4634 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4635 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4636 4637 free(bdev_io); 4638 4639 spdk_put_io_channel(ch); 4640 4641 poll_threads(); 4642 4643 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4644 CU_ASSERT(rc == 0); 4645 4646 poll_threads(); 4647 spdk_delay_us(1000); 4648 poll_threads(); 4649 4650 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4651 4652 g_opts.bdev_retry_count = 0; 4653 } 4654 4655 static void 4656 test_retry_io_count(void) 4657 { 4658 struct nvme_path_id path = {}; 4659 struct spdk_nvme_ctrlr *ctrlr; 4660 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4661 struct nvme_ctrlr *nvme_ctrlr; 4662 const int STRING_SIZE = 32; 4663 const char *attached_names[STRING_SIZE]; 4664 struct nvme_bdev *bdev; 4665 struct nvme_ns *nvme_ns; 4666 struct spdk_bdev_io *bdev_io; 4667 struct nvme_bdev_io *bio; 4668 struct spdk_io_channel *ch; 4669 struct nvme_bdev_channel *nbdev_ch; 4670 struct nvme_io_path *io_path; 4671 struct nvme_qpair *nvme_qpair; 4672 struct ut_nvme_req *req; 4673 int rc; 4674 4675 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4676 ut_init_trid(&path.trid); 4677 4678 set_thread(0); 4679 4680 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4681 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4682 4683 g_ut_attach_ctrlr_status = 0; 4684 g_ut_attach_bdev_count = 1; 4685 4686 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4687 attach_ctrlr_done, NULL, NULL, NULL, false); 4688 CU_ASSERT(rc == 0); 4689 4690 spdk_delay_us(1000); 4691 poll_threads(); 4692 4693 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4694 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4695 4696 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4697 CU_ASSERT(nvme_ctrlr != NULL); 4698 4699 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4700 CU_ASSERT(bdev != NULL); 4701 4702 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4703 CU_ASSERT(nvme_ns != NULL); 4704 4705 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4706 ut_bdev_io_set_buf(bdev_io); 4707 4708 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4709 4710 ch = spdk_get_io_channel(bdev); 4711 SPDK_CU_ASSERT_FATAL(ch != NULL); 4712 4713 nbdev_ch = spdk_io_channel_get_ctx(ch); 4714 4715 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4716 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4717 4718 nvme_qpair = io_path->qpair; 4719 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4720 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4721 4722 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4723 4724 /* If I/O is aborted by request, it should not be retried. */ 4725 g_opts.bdev_retry_count = 1; 4726 4727 bdev_io->internal.in_submit_request = true; 4728 4729 bdev_nvme_submit_request(ch, bdev_io); 4730 4731 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4732 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4733 4734 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4735 SPDK_CU_ASSERT_FATAL(req != NULL); 4736 4737 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4738 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4739 4740 poll_thread_times(0, 1); 4741 4742 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4743 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4744 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4745 4746 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4747 * the failed I/O should not be retried. 4748 */ 4749 g_opts.bdev_retry_count = 4; 4750 4751 bdev_io->internal.in_submit_request = true; 4752 4753 bdev_nvme_submit_request(ch, bdev_io); 4754 4755 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4756 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4757 4758 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4759 SPDK_CU_ASSERT_FATAL(req != NULL); 4760 4761 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4762 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4763 bio->retry_count = 4; 4764 4765 poll_thread_times(0, 1); 4766 4767 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4768 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4769 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4770 4771 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4772 g_opts.bdev_retry_count = -1; 4773 4774 bdev_io->internal.in_submit_request = true; 4775 4776 bdev_nvme_submit_request(ch, bdev_io); 4777 4778 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4779 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4780 4781 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4782 SPDK_CU_ASSERT_FATAL(req != NULL); 4783 4784 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4785 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4786 bio->retry_count = 4; 4787 4788 poll_thread_times(0, 1); 4789 4790 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4791 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4792 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4793 4794 poll_threads(); 4795 4796 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4797 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4798 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4799 4800 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4801 * the failed I/O should be retried. 4802 */ 4803 g_opts.bdev_retry_count = 4; 4804 4805 bdev_io->internal.in_submit_request = true; 4806 4807 bdev_nvme_submit_request(ch, bdev_io); 4808 4809 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4810 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4811 4812 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4813 SPDK_CU_ASSERT_FATAL(req != NULL); 4814 4815 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4816 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4817 bio->retry_count = 3; 4818 4819 poll_thread_times(0, 1); 4820 4821 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4822 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4823 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4824 4825 poll_threads(); 4826 4827 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4828 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4829 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4830 4831 free(bdev_io); 4832 4833 spdk_put_io_channel(ch); 4834 4835 poll_threads(); 4836 4837 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4838 CU_ASSERT(rc == 0); 4839 4840 poll_threads(); 4841 spdk_delay_us(1000); 4842 poll_threads(); 4843 4844 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4845 4846 g_opts.bdev_retry_count = 0; 4847 } 4848 4849 static void 4850 test_concurrent_read_ana_log_page(void) 4851 { 4852 struct spdk_nvme_transport_id trid = {}; 4853 struct spdk_nvme_ctrlr *ctrlr; 4854 struct nvme_ctrlr *nvme_ctrlr; 4855 const int STRING_SIZE = 32; 4856 const char *attached_names[STRING_SIZE]; 4857 int rc; 4858 4859 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4860 ut_init_trid(&trid); 4861 4862 set_thread(0); 4863 4864 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4865 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4866 4867 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4868 4869 g_ut_attach_ctrlr_status = 0; 4870 g_ut_attach_bdev_count = 1; 4871 4872 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4873 attach_ctrlr_done, NULL, NULL, NULL, false); 4874 CU_ASSERT(rc == 0); 4875 4876 spdk_delay_us(1000); 4877 poll_threads(); 4878 4879 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4880 poll_threads(); 4881 4882 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4883 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4884 4885 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4886 4887 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4888 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4889 4890 /* Following read request should be rejected. */ 4891 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4892 4893 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4894 4895 set_thread(1); 4896 4897 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4898 4899 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4900 4901 /* Reset request while reading ANA log page should not be rejected. */ 4902 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4903 CU_ASSERT(rc == 0); 4904 4905 poll_threads(); 4906 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4907 poll_threads(); 4908 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4909 poll_threads(); 4910 4911 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4912 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4913 4914 /* Read ANA log page while resetting ctrlr should be rejected. */ 4915 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4916 CU_ASSERT(rc == 0); 4917 4918 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4919 4920 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4921 4922 poll_threads(); 4923 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4924 poll_threads(); 4925 4926 set_thread(0); 4927 4928 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4929 CU_ASSERT(rc == 0); 4930 4931 poll_threads(); 4932 spdk_delay_us(1000); 4933 poll_threads(); 4934 4935 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4936 } 4937 4938 static void 4939 test_retry_io_for_ana_error(void) 4940 { 4941 struct nvme_path_id path = {}; 4942 struct spdk_nvme_ctrlr *ctrlr; 4943 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4944 struct nvme_ctrlr *nvme_ctrlr; 4945 const int STRING_SIZE = 32; 4946 const char *attached_names[STRING_SIZE]; 4947 struct nvme_bdev *bdev; 4948 struct nvme_ns *nvme_ns; 4949 struct spdk_bdev_io *bdev_io; 4950 struct nvme_bdev_io *bio; 4951 struct spdk_io_channel *ch; 4952 struct nvme_bdev_channel *nbdev_ch; 4953 struct nvme_io_path *io_path; 4954 struct nvme_qpair *nvme_qpair; 4955 struct ut_nvme_req *req; 4956 uint64_t now; 4957 int rc; 4958 4959 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4960 ut_init_trid(&path.trid); 4961 4962 g_opts.bdev_retry_count = 1; 4963 4964 set_thread(0); 4965 4966 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4967 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4968 4969 g_ut_attach_ctrlr_status = 0; 4970 g_ut_attach_bdev_count = 1; 4971 4972 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4973 attach_ctrlr_done, NULL, NULL, NULL, false); 4974 CU_ASSERT(rc == 0); 4975 4976 spdk_delay_us(1000); 4977 poll_threads(); 4978 4979 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4980 poll_threads(); 4981 4982 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4983 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4984 4985 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4986 CU_ASSERT(nvme_ctrlr != NULL); 4987 4988 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4989 CU_ASSERT(bdev != NULL); 4990 4991 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4992 CU_ASSERT(nvme_ns != NULL); 4993 4994 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4995 ut_bdev_io_set_buf(bdev_io); 4996 4997 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4998 4999 ch = spdk_get_io_channel(bdev); 5000 SPDK_CU_ASSERT_FATAL(ch != NULL); 5001 5002 nbdev_ch = spdk_io_channel_get_ctx(ch); 5003 5004 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5005 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5006 5007 nvme_qpair = io_path->qpair; 5008 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5009 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5010 5011 now = spdk_get_ticks(); 5012 5013 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5014 5015 /* If I/O got ANA error, it should be queued, the corresponding namespace 5016 * should be freezed and its ANA state should be updated. 5017 */ 5018 bdev_io->internal.in_submit_request = true; 5019 5020 bdev_nvme_submit_request(ch, bdev_io); 5021 5022 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5023 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5024 5025 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5026 SPDK_CU_ASSERT_FATAL(req != NULL); 5027 5028 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5029 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5030 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5031 5032 poll_thread_times(0, 1); 5033 5034 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5035 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5036 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5037 /* I/O should be retried immediately. */ 5038 CU_ASSERT(bio->retry_ticks == now); 5039 CU_ASSERT(nvme_ns->ana_state_updating == true); 5040 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5041 5042 poll_threads(); 5043 5044 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5045 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5046 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5047 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5048 /* I/O should be retried after a second if no I/O path was found but 5049 * any I/O path may become available. 5050 */ 5051 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5052 5053 /* Namespace should be unfreezed after completing to update its ANA state. */ 5054 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5055 poll_threads(); 5056 5057 CU_ASSERT(nvme_ns->ana_state_updating == false); 5058 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5059 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5060 5061 /* Retry the queued I/O should succeed. */ 5062 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5063 poll_threads(); 5064 5065 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5066 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5067 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5068 5069 free(bdev_io); 5070 5071 spdk_put_io_channel(ch); 5072 5073 poll_threads(); 5074 5075 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5076 CU_ASSERT(rc == 0); 5077 5078 poll_threads(); 5079 spdk_delay_us(1000); 5080 poll_threads(); 5081 5082 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5083 5084 g_opts.bdev_retry_count = 0; 5085 } 5086 5087 static void 5088 test_check_io_error_resiliency_params(void) 5089 { 5090 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5091 * 3rd parameter is fast_io_fail_timeout_sec. 5092 */ 5093 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5094 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5095 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5096 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5097 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5098 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5099 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5100 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5101 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5102 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5103 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5104 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5105 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5106 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5107 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5108 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5109 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5110 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5111 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5112 } 5113 5114 static void 5115 test_retry_io_if_ctrlr_is_resetting(void) 5116 { 5117 struct nvme_path_id path = {}; 5118 struct nvme_ctrlr_opts opts = {}; 5119 struct spdk_nvme_ctrlr *ctrlr; 5120 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5121 struct nvme_ctrlr *nvme_ctrlr; 5122 const int STRING_SIZE = 32; 5123 const char *attached_names[STRING_SIZE]; 5124 struct nvme_bdev *bdev; 5125 struct nvme_ns *nvme_ns; 5126 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5127 struct spdk_io_channel *ch; 5128 struct nvme_bdev_channel *nbdev_ch; 5129 struct nvme_io_path *io_path; 5130 struct nvme_qpair *nvme_qpair; 5131 int rc; 5132 5133 g_opts.bdev_retry_count = 1; 5134 5135 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5136 ut_init_trid(&path.trid); 5137 5138 set_thread(0); 5139 5140 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5141 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5142 5143 g_ut_attach_ctrlr_status = 0; 5144 g_ut_attach_bdev_count = 1; 5145 5146 opts.ctrlr_loss_timeout_sec = -1; 5147 opts.reconnect_delay_sec = 1; 5148 5149 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5150 attach_ctrlr_done, NULL, NULL, &opts, false); 5151 CU_ASSERT(rc == 0); 5152 5153 spdk_delay_us(1000); 5154 poll_threads(); 5155 5156 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5157 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5158 5159 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5160 CU_ASSERT(nvme_ctrlr != NULL); 5161 5162 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5163 CU_ASSERT(bdev != NULL); 5164 5165 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5166 CU_ASSERT(nvme_ns != NULL); 5167 5168 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5169 ut_bdev_io_set_buf(bdev_io1); 5170 5171 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5172 ut_bdev_io_set_buf(bdev_io2); 5173 5174 ch = spdk_get_io_channel(bdev); 5175 SPDK_CU_ASSERT_FATAL(ch != NULL); 5176 5177 nbdev_ch = spdk_io_channel_get_ctx(ch); 5178 5179 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5180 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5181 5182 nvme_qpair = io_path->qpair; 5183 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5184 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5185 5186 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5187 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5188 5189 /* If qpair is connected, I/O should succeed. */ 5190 bdev_io1->internal.in_submit_request = true; 5191 5192 bdev_nvme_submit_request(ch, bdev_io1); 5193 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5194 5195 poll_threads(); 5196 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5197 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5198 5199 /* If qpair is disconnected, it is freed and then reconnected via resetting 5200 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5201 * while resetting the nvme_ctrlr. 5202 */ 5203 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5204 ctrlr->is_failed = true; 5205 5206 poll_thread_times(0, 5); 5207 5208 CU_ASSERT(nvme_qpair->qpair == NULL); 5209 CU_ASSERT(nvme_ctrlr->resetting == true); 5210 CU_ASSERT(ctrlr->is_failed == false); 5211 5212 bdev_io1->internal.in_submit_request = true; 5213 5214 bdev_nvme_submit_request(ch, bdev_io1); 5215 5216 spdk_delay_us(1); 5217 5218 bdev_io2->internal.in_submit_request = true; 5219 5220 bdev_nvme_submit_request(ch, bdev_io2); 5221 5222 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5223 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5224 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5225 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx( 5226 TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx, 5227 retry_link))); 5228 5229 poll_threads(); 5230 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5231 poll_threads(); 5232 5233 CU_ASSERT(nvme_qpair->qpair != NULL); 5234 CU_ASSERT(nvme_ctrlr->resetting == false); 5235 5236 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5237 5238 poll_thread_times(0, 1); 5239 5240 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5241 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5242 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5243 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5244 5245 poll_threads(); 5246 5247 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5248 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5249 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5250 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5251 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5252 5253 spdk_delay_us(1); 5254 5255 poll_thread_times(0, 1); 5256 5257 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5258 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5259 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5260 5261 poll_threads(); 5262 5263 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5264 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5265 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5266 5267 free(bdev_io1); 5268 free(bdev_io2); 5269 5270 spdk_put_io_channel(ch); 5271 5272 poll_threads(); 5273 5274 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5275 CU_ASSERT(rc == 0); 5276 5277 poll_threads(); 5278 spdk_delay_us(1000); 5279 poll_threads(); 5280 5281 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5282 5283 g_opts.bdev_retry_count = 0; 5284 } 5285 5286 static void 5287 test_reconnect_ctrlr(void) 5288 { 5289 struct spdk_nvme_transport_id trid = {}; 5290 struct spdk_nvme_ctrlr ctrlr = {}; 5291 struct nvme_ctrlr *nvme_ctrlr; 5292 struct spdk_io_channel *ch1, *ch2; 5293 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5294 int rc; 5295 5296 ut_init_trid(&trid); 5297 TAILQ_INIT(&ctrlr.active_io_qpairs); 5298 5299 set_thread(0); 5300 5301 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5302 CU_ASSERT(rc == 0); 5303 5304 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5305 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5306 5307 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5308 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5309 5310 ch1 = spdk_get_io_channel(nvme_ctrlr); 5311 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5312 5313 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5314 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5315 5316 set_thread(1); 5317 5318 ch2 = spdk_get_io_channel(nvme_ctrlr); 5319 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5320 5321 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5322 5323 /* Reset starts from thread 1. */ 5324 set_thread(1); 5325 5326 /* The reset should fail and a reconnect timer should be registered. */ 5327 ctrlr.fail_reset = true; 5328 ctrlr.is_failed = true; 5329 5330 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5331 CU_ASSERT(rc == 0); 5332 CU_ASSERT(nvme_ctrlr->resetting == true); 5333 CU_ASSERT(ctrlr.is_failed == true); 5334 5335 poll_threads(); 5336 5337 CU_ASSERT(nvme_ctrlr->resetting == false); 5338 CU_ASSERT(ctrlr.is_failed == false); 5339 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5340 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5341 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5342 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5343 5344 /* A new reset starts from thread 0. */ 5345 set_thread(1); 5346 5347 /* The reset should cancel the reconnect timer and should start from reconnection. 5348 * Then, the reset should fail and a reconnect timer should be registered again. 5349 */ 5350 ctrlr.fail_reset = true; 5351 ctrlr.is_failed = true; 5352 5353 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5354 CU_ASSERT(rc == 0); 5355 CU_ASSERT(nvme_ctrlr->resetting == true); 5356 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5357 CU_ASSERT(ctrlr.is_failed == true); 5358 5359 poll_threads(); 5360 5361 CU_ASSERT(nvme_ctrlr->resetting == false); 5362 CU_ASSERT(ctrlr.is_failed == false); 5363 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5364 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5365 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5366 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5367 5368 /* Then a reconnect retry should suceeed. */ 5369 ctrlr.fail_reset = false; 5370 5371 spdk_delay_us(SPDK_SEC_TO_USEC); 5372 poll_thread_times(0, 1); 5373 5374 CU_ASSERT(nvme_ctrlr->resetting == true); 5375 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5376 5377 poll_threads(); 5378 5379 CU_ASSERT(nvme_ctrlr->resetting == false); 5380 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5381 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5382 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5383 5384 /* The reset should fail and a reconnect timer should be registered. */ 5385 ctrlr.fail_reset = true; 5386 ctrlr.is_failed = true; 5387 5388 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5389 CU_ASSERT(rc == 0); 5390 CU_ASSERT(nvme_ctrlr->resetting == true); 5391 CU_ASSERT(ctrlr.is_failed == true); 5392 5393 poll_threads(); 5394 5395 CU_ASSERT(nvme_ctrlr->resetting == false); 5396 CU_ASSERT(ctrlr.is_failed == false); 5397 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5398 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5399 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5400 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5401 5402 /* Then a reconnect retry should still fail. */ 5403 spdk_delay_us(SPDK_SEC_TO_USEC); 5404 poll_thread_times(0, 1); 5405 5406 CU_ASSERT(nvme_ctrlr->resetting == true); 5407 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5408 5409 poll_threads(); 5410 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5411 poll_threads(); 5412 5413 CU_ASSERT(nvme_ctrlr->resetting == false); 5414 CU_ASSERT(ctrlr.is_failed == false); 5415 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5416 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5417 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5418 5419 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5420 spdk_delay_us(SPDK_SEC_TO_USEC); 5421 poll_threads(); 5422 5423 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5424 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5425 CU_ASSERT(nvme_ctrlr->destruct == true); 5426 5427 spdk_put_io_channel(ch2); 5428 5429 set_thread(0); 5430 5431 spdk_put_io_channel(ch1); 5432 5433 poll_threads(); 5434 spdk_delay_us(1000); 5435 poll_threads(); 5436 5437 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5438 } 5439 5440 static struct nvme_path_id * 5441 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5442 const struct spdk_nvme_transport_id *trid) 5443 { 5444 struct nvme_path_id *p; 5445 5446 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5447 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5448 break; 5449 } 5450 } 5451 5452 return p; 5453 } 5454 5455 static void 5456 test_retry_failover_ctrlr(void) 5457 { 5458 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5459 struct spdk_nvme_ctrlr ctrlr = {}; 5460 struct nvme_ctrlr *nvme_ctrlr = NULL; 5461 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5462 struct spdk_io_channel *ch; 5463 struct nvme_ctrlr_channel *ctrlr_ch; 5464 int rc; 5465 5466 ut_init_trid(&trid1); 5467 ut_init_trid2(&trid2); 5468 ut_init_trid3(&trid3); 5469 TAILQ_INIT(&ctrlr.active_io_qpairs); 5470 5471 set_thread(0); 5472 5473 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5474 CU_ASSERT(rc == 0); 5475 5476 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5477 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5478 5479 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5480 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5481 5482 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5483 CU_ASSERT(rc == 0); 5484 5485 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5486 CU_ASSERT(rc == 0); 5487 5488 ch = spdk_get_io_channel(nvme_ctrlr); 5489 SPDK_CU_ASSERT_FATAL(ch != NULL); 5490 5491 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5492 5493 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5494 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5495 CU_ASSERT(path_id1->last_failed_tsc == 0); 5496 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5497 5498 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5499 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5500 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5501 5502 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5503 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5504 5505 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5506 * and a reconnect timer is started. */ 5507 ctrlr.fail_reset = true; 5508 ctrlr.is_failed = true; 5509 5510 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5511 CU_ASSERT(rc == 0); 5512 5513 poll_threads(); 5514 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5515 poll_threads(); 5516 5517 CU_ASSERT(nvme_ctrlr->resetting == false); 5518 CU_ASSERT(ctrlr.is_failed == false); 5519 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5520 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5521 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5522 CU_ASSERT(path_id1->last_failed_tsc != 0); 5523 5524 CU_ASSERT(path_id2->last_failed_tsc != 0); 5525 CU_ASSERT(path_id3->last_failed_tsc != 0); 5526 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5527 5528 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5529 * switched to trid2 but reset is not started. 5530 */ 5531 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5532 CU_ASSERT(rc == -EALREADY); 5533 5534 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5535 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5536 5537 CU_ASSERT(nvme_ctrlr->resetting == false); 5538 5539 /* If reconnect succeeds, trid2 should be the active path_id */ 5540 ctrlr.fail_reset = false; 5541 5542 spdk_delay_us(SPDK_SEC_TO_USEC); 5543 poll_thread_times(0, 1); 5544 5545 CU_ASSERT(nvme_ctrlr->resetting == true); 5546 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5547 5548 poll_threads(); 5549 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5550 poll_threads(); 5551 5552 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5553 CU_ASSERT(path_id2->last_failed_tsc == 0); 5554 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5555 CU_ASSERT(nvme_ctrlr->resetting == false); 5556 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5557 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5558 5559 spdk_put_io_channel(ch); 5560 5561 poll_threads(); 5562 5563 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5564 CU_ASSERT(rc == 0); 5565 5566 poll_threads(); 5567 spdk_delay_us(1000); 5568 poll_threads(); 5569 5570 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5571 } 5572 5573 static void 5574 test_fail_path(void) 5575 { 5576 struct nvme_path_id path = {}; 5577 struct nvme_ctrlr_opts opts = {}; 5578 struct spdk_nvme_ctrlr *ctrlr; 5579 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5580 struct nvme_ctrlr *nvme_ctrlr; 5581 const int STRING_SIZE = 32; 5582 const char *attached_names[STRING_SIZE]; 5583 struct nvme_bdev *bdev; 5584 struct nvme_ns *nvme_ns; 5585 struct spdk_bdev_io *bdev_io; 5586 struct spdk_io_channel *ch; 5587 struct nvme_bdev_channel *nbdev_ch; 5588 struct nvme_io_path *io_path; 5589 struct nvme_ctrlr_channel *ctrlr_ch; 5590 int rc; 5591 5592 /* The test scenario is the following. 5593 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5594 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5595 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5596 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5597 * comes first. The queued I/O is failed. 5598 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5599 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5600 */ 5601 5602 g_opts.bdev_retry_count = 1; 5603 5604 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5605 ut_init_trid(&path.trid); 5606 5607 set_thread(0); 5608 5609 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5610 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5611 5612 g_ut_attach_ctrlr_status = 0; 5613 g_ut_attach_bdev_count = 1; 5614 5615 opts.ctrlr_loss_timeout_sec = 4; 5616 opts.reconnect_delay_sec = 1; 5617 opts.fast_io_fail_timeout_sec = 2; 5618 5619 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5620 attach_ctrlr_done, NULL, NULL, &opts, false); 5621 CU_ASSERT(rc == 0); 5622 5623 spdk_delay_us(1000); 5624 poll_threads(); 5625 5626 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5627 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5628 5629 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5630 CU_ASSERT(nvme_ctrlr != NULL); 5631 5632 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5633 CU_ASSERT(bdev != NULL); 5634 5635 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5636 CU_ASSERT(nvme_ns != NULL); 5637 5638 ch = spdk_get_io_channel(bdev); 5639 SPDK_CU_ASSERT_FATAL(ch != NULL); 5640 5641 nbdev_ch = spdk_io_channel_get_ctx(ch); 5642 5643 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5644 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5645 5646 ctrlr_ch = io_path->qpair->ctrlr_ch; 5647 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5648 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5649 5650 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5651 ut_bdev_io_set_buf(bdev_io); 5652 5653 5654 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5655 ctrlr->fail_reset = true; 5656 ctrlr->is_failed = true; 5657 5658 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5659 CU_ASSERT(rc == 0); 5660 CU_ASSERT(nvme_ctrlr->resetting == true); 5661 CU_ASSERT(ctrlr->is_failed == true); 5662 5663 poll_threads(); 5664 5665 CU_ASSERT(nvme_ctrlr->resetting == false); 5666 CU_ASSERT(ctrlr->is_failed == false); 5667 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5668 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5669 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5670 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5671 5672 /* I/O should be queued. */ 5673 bdev_io->internal.in_submit_request = true; 5674 5675 bdev_nvme_submit_request(ch, bdev_io); 5676 5677 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5678 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5679 5680 /* After a second, the I/O should be still queued and the ctrlr should be 5681 * still recovering. 5682 */ 5683 spdk_delay_us(SPDK_SEC_TO_USEC); 5684 poll_threads(); 5685 5686 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5687 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5688 5689 CU_ASSERT(nvme_ctrlr->resetting == false); 5690 CU_ASSERT(ctrlr->is_failed == false); 5691 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5692 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5693 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5694 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5695 5696 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5697 5698 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5699 spdk_delay_us(SPDK_SEC_TO_USEC); 5700 poll_threads(); 5701 5702 CU_ASSERT(nvme_ctrlr->resetting == false); 5703 CU_ASSERT(ctrlr->is_failed == false); 5704 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5705 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5706 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5707 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5708 5709 /* Then within a second, pending I/O should be failed. */ 5710 spdk_delay_us(SPDK_SEC_TO_USEC); 5711 poll_threads(); 5712 5713 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5714 poll_threads(); 5715 5716 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5717 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5718 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5719 5720 /* Another I/O submission should be failed immediately. */ 5721 bdev_io->internal.in_submit_request = true; 5722 5723 bdev_nvme_submit_request(ch, bdev_io); 5724 5725 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5726 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5727 5728 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5729 * be deleted. 5730 */ 5731 spdk_delay_us(SPDK_SEC_TO_USEC); 5732 poll_threads(); 5733 5734 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5735 poll_threads(); 5736 5737 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5738 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5739 CU_ASSERT(nvme_ctrlr->destruct == true); 5740 5741 spdk_put_io_channel(ch); 5742 5743 poll_threads(); 5744 spdk_delay_us(1000); 5745 poll_threads(); 5746 5747 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5748 5749 free(bdev_io); 5750 5751 g_opts.bdev_retry_count = 0; 5752 } 5753 5754 static void 5755 test_nvme_ns_cmp(void) 5756 { 5757 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5758 5759 nvme_ns1.id = 0; 5760 nvme_ns2.id = UINT32_MAX; 5761 5762 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5763 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5764 } 5765 5766 static void 5767 test_ana_transition(void) 5768 { 5769 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5770 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5771 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5772 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5773 5774 /* case 1: ANA transition timedout is canceled. */ 5775 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5776 nvme_ns.ana_transition_timedout = true; 5777 5778 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5779 5780 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5781 5782 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5783 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5784 5785 /* case 2: ANATT timer is kept. */ 5786 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5787 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5788 &nvme_ns, 5789 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5790 5791 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5792 5793 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5794 5795 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5796 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5797 5798 /* case 3: ANATT timer is stopped. */ 5799 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5800 5801 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5802 5803 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5804 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5805 5806 /* ANATT timer is started. */ 5807 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5808 5809 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5810 5811 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5812 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5813 5814 /* ANATT timer is expired. */ 5815 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5816 5817 poll_threads(); 5818 5819 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5820 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5821 } 5822 5823 static void 5824 _set_preferred_path_cb(void *cb_arg, int rc) 5825 { 5826 bool *done = cb_arg; 5827 5828 *done = true; 5829 } 5830 5831 static void 5832 test_set_preferred_path(void) 5833 { 5834 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5835 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5836 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5837 const int STRING_SIZE = 32; 5838 const char *attached_names[STRING_SIZE]; 5839 struct nvme_bdev *bdev; 5840 struct spdk_io_channel *ch; 5841 struct nvme_bdev_channel *nbdev_ch; 5842 struct nvme_io_path *io_path; 5843 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5844 const struct spdk_nvme_ctrlr_data *cdata; 5845 bool done; 5846 int rc; 5847 5848 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5849 ut_init_trid(&path1.trid); 5850 ut_init_trid2(&path2.trid); 5851 ut_init_trid3(&path3.trid); 5852 g_ut_attach_ctrlr_status = 0; 5853 g_ut_attach_bdev_count = 1; 5854 5855 set_thread(0); 5856 5857 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5858 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5859 5860 ctrlr1->ns[0].uuid = &uuid1; 5861 5862 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5863 attach_ctrlr_done, NULL, NULL, NULL, true); 5864 CU_ASSERT(rc == 0); 5865 5866 spdk_delay_us(1000); 5867 poll_threads(); 5868 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5869 poll_threads(); 5870 5871 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5872 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5873 5874 ctrlr2->ns[0].uuid = &uuid1; 5875 5876 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5877 attach_ctrlr_done, NULL, NULL, NULL, true); 5878 CU_ASSERT(rc == 0); 5879 5880 spdk_delay_us(1000); 5881 poll_threads(); 5882 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5883 poll_threads(); 5884 5885 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5886 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5887 5888 ctrlr3->ns[0].uuid = &uuid1; 5889 5890 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5891 attach_ctrlr_done, NULL, NULL, NULL, true); 5892 CU_ASSERT(rc == 0); 5893 5894 spdk_delay_us(1000); 5895 poll_threads(); 5896 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5897 poll_threads(); 5898 5899 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5900 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5901 5902 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5903 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5904 5905 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5906 5907 ch = spdk_get_io_channel(bdev); 5908 SPDK_CU_ASSERT_FATAL(ch != NULL); 5909 nbdev_ch = spdk_io_channel_get_ctx(ch); 5910 5911 io_path = bdev_nvme_find_io_path(nbdev_ch); 5912 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5913 5914 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5915 5916 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5917 * should return io_path to ctrlr2. 5918 */ 5919 5920 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5921 done = false; 5922 5923 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5924 5925 poll_threads(); 5926 CU_ASSERT(done == true); 5927 5928 io_path = bdev_nvme_find_io_path(nbdev_ch); 5929 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5930 5931 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5932 5933 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5934 * acquired, find_io_path() should return io_path to ctrlr3. 5935 */ 5936 5937 spdk_put_io_channel(ch); 5938 5939 poll_threads(); 5940 5941 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5942 done = false; 5943 5944 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5945 5946 poll_threads(); 5947 CU_ASSERT(done == true); 5948 5949 ch = spdk_get_io_channel(bdev); 5950 SPDK_CU_ASSERT_FATAL(ch != NULL); 5951 nbdev_ch = spdk_io_channel_get_ctx(ch); 5952 5953 io_path = bdev_nvme_find_io_path(nbdev_ch); 5954 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5955 5956 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5957 5958 spdk_put_io_channel(ch); 5959 5960 poll_threads(); 5961 5962 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5963 CU_ASSERT(rc == 0); 5964 5965 poll_threads(); 5966 spdk_delay_us(1000); 5967 poll_threads(); 5968 5969 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5970 } 5971 5972 static void 5973 test_find_next_io_path(void) 5974 { 5975 struct nvme_bdev_channel nbdev_ch = { 5976 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5977 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5978 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 5979 }; 5980 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5981 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5982 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5983 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5984 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5985 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5986 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5987 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5988 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5989 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5990 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5991 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 5992 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5993 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5994 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5995 5996 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 5997 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 5998 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 5999 6000 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6001 * is covered in test_find_io_path. 6002 */ 6003 6004 nbdev_ch.current_io_path = &io_path2; 6005 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6006 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6007 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6008 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6009 6010 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6011 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6012 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6013 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6014 6015 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6016 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6017 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6018 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6019 6020 nbdev_ch.current_io_path = &io_path3; 6021 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6022 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6023 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6024 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6025 6026 /* Test if next io_path is selected according to rr_min_io */ 6027 6028 nbdev_ch.current_io_path = NULL; 6029 nbdev_ch.rr_min_io = 2; 6030 nbdev_ch.rr_counter = 0; 6031 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6032 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6033 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6034 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6035 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6036 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6037 6038 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6039 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6040 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6041 } 6042 6043 static void 6044 test_find_io_path_min_qd(void) 6045 { 6046 struct nvme_bdev_channel nbdev_ch = { 6047 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6048 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6049 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6050 }; 6051 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6052 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6053 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6054 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6055 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6056 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6057 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6058 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6059 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6060 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6061 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6062 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 6063 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6064 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6065 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6066 6067 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6068 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6069 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6070 6071 /* Test if the minumum io_outstanding or the ANA optimized state is 6072 * prioritized when using least queue depth selector 6073 */ 6074 qpair1.num_outstanding_reqs = 2; 6075 qpair2.num_outstanding_reqs = 1; 6076 qpair3.num_outstanding_reqs = 0; 6077 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6078 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6079 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6080 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6081 6082 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6083 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6084 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6085 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6086 6087 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6088 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6089 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6090 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6091 6092 qpair2.num_outstanding_reqs = 4; 6093 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6094 } 6095 6096 static void 6097 test_disable_auto_failback(void) 6098 { 6099 struct nvme_path_id path1 = {}, path2 = {}; 6100 struct nvme_ctrlr_opts opts = {}; 6101 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6102 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6103 struct nvme_ctrlr *nvme_ctrlr1; 6104 const int STRING_SIZE = 32; 6105 const char *attached_names[STRING_SIZE]; 6106 struct nvme_bdev *bdev; 6107 struct spdk_io_channel *ch; 6108 struct nvme_bdev_channel *nbdev_ch; 6109 struct nvme_io_path *io_path; 6110 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6111 const struct spdk_nvme_ctrlr_data *cdata; 6112 bool done; 6113 int rc; 6114 6115 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6116 ut_init_trid(&path1.trid); 6117 ut_init_trid2(&path2.trid); 6118 g_ut_attach_ctrlr_status = 0; 6119 g_ut_attach_bdev_count = 1; 6120 6121 g_opts.disable_auto_failback = true; 6122 6123 opts.ctrlr_loss_timeout_sec = -1; 6124 opts.reconnect_delay_sec = 1; 6125 6126 set_thread(0); 6127 6128 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6129 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6130 6131 ctrlr1->ns[0].uuid = &uuid1; 6132 6133 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6134 attach_ctrlr_done, NULL, NULL, &opts, true); 6135 CU_ASSERT(rc == 0); 6136 6137 spdk_delay_us(1000); 6138 poll_threads(); 6139 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6140 poll_threads(); 6141 6142 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6143 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6144 6145 ctrlr2->ns[0].uuid = &uuid1; 6146 6147 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6148 attach_ctrlr_done, NULL, NULL, &opts, true); 6149 CU_ASSERT(rc == 0); 6150 6151 spdk_delay_us(1000); 6152 poll_threads(); 6153 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6154 poll_threads(); 6155 6156 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6157 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6158 6159 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6160 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6161 6162 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6163 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6164 6165 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6166 6167 ch = spdk_get_io_channel(bdev); 6168 SPDK_CU_ASSERT_FATAL(ch != NULL); 6169 nbdev_ch = spdk_io_channel_get_ctx(ch); 6170 6171 io_path = bdev_nvme_find_io_path(nbdev_ch); 6172 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6173 6174 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6175 6176 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6177 ctrlr1->fail_reset = true; 6178 ctrlr1->is_failed = true; 6179 6180 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6181 6182 poll_threads(); 6183 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6184 poll_threads(); 6185 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6186 poll_threads(); 6187 6188 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6189 6190 io_path = bdev_nvme_find_io_path(nbdev_ch); 6191 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6192 6193 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6194 6195 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6196 * Hence, io_path to ctrlr2 should still be used. 6197 */ 6198 ctrlr1->fail_reset = false; 6199 6200 spdk_delay_us(SPDK_SEC_TO_USEC); 6201 poll_threads(); 6202 6203 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6204 6205 io_path = bdev_nvme_find_io_path(nbdev_ch); 6206 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6207 6208 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6209 6210 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6211 * be used again. 6212 */ 6213 6214 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6215 done = false; 6216 6217 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6218 6219 poll_threads(); 6220 CU_ASSERT(done == true); 6221 6222 io_path = bdev_nvme_find_io_path(nbdev_ch); 6223 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6224 6225 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6226 6227 spdk_put_io_channel(ch); 6228 6229 poll_threads(); 6230 6231 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6232 CU_ASSERT(rc == 0); 6233 6234 poll_threads(); 6235 spdk_delay_us(1000); 6236 poll_threads(); 6237 6238 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6239 6240 g_opts.disable_auto_failback = false; 6241 } 6242 6243 static void 6244 ut_set_multipath_policy_done(void *cb_arg, int rc) 6245 { 6246 int *done = cb_arg; 6247 6248 SPDK_CU_ASSERT_FATAL(done != NULL); 6249 *done = rc; 6250 } 6251 6252 static void 6253 test_set_multipath_policy(void) 6254 { 6255 struct nvme_path_id path1 = {}, path2 = {}; 6256 struct nvme_ctrlr_opts opts = {}; 6257 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6258 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6259 const int STRING_SIZE = 32; 6260 const char *attached_names[STRING_SIZE]; 6261 struct nvme_bdev *bdev; 6262 struct spdk_io_channel *ch; 6263 struct nvme_bdev_channel *nbdev_ch; 6264 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6265 int done; 6266 int rc; 6267 6268 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6269 ut_init_trid(&path1.trid); 6270 ut_init_trid2(&path2.trid); 6271 g_ut_attach_ctrlr_status = 0; 6272 g_ut_attach_bdev_count = 1; 6273 6274 g_opts.disable_auto_failback = true; 6275 6276 opts.ctrlr_loss_timeout_sec = -1; 6277 opts.reconnect_delay_sec = 1; 6278 6279 set_thread(0); 6280 6281 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6282 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6283 6284 ctrlr1->ns[0].uuid = &uuid1; 6285 6286 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6287 attach_ctrlr_done, NULL, NULL, &opts, true); 6288 CU_ASSERT(rc == 0); 6289 6290 spdk_delay_us(1000); 6291 poll_threads(); 6292 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6293 poll_threads(); 6294 6295 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6296 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6297 6298 ctrlr2->ns[0].uuid = &uuid1; 6299 6300 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6301 attach_ctrlr_done, NULL, NULL, &opts, true); 6302 CU_ASSERT(rc == 0); 6303 6304 spdk_delay_us(1000); 6305 poll_threads(); 6306 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6307 poll_threads(); 6308 6309 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6310 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6311 6312 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6313 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6314 6315 /* If multipath policy is updated before getting any I/O channel, 6316 * an new I/O channel should have the update. 6317 */ 6318 done = -1; 6319 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6320 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6321 ut_set_multipath_policy_done, &done); 6322 poll_threads(); 6323 CU_ASSERT(done == 0); 6324 6325 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6326 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6327 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6328 6329 ch = spdk_get_io_channel(bdev); 6330 SPDK_CU_ASSERT_FATAL(ch != NULL); 6331 nbdev_ch = spdk_io_channel_get_ctx(ch); 6332 6333 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6334 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6335 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6336 6337 /* If multipath policy is updated while a I/O channel is active, 6338 * the update should be applied to the I/O channel immediately. 6339 */ 6340 done = -1; 6341 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6342 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6343 ut_set_multipath_policy_done, &done); 6344 poll_threads(); 6345 CU_ASSERT(done == 0); 6346 6347 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6348 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6349 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6350 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6351 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6352 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6353 6354 spdk_put_io_channel(ch); 6355 6356 poll_threads(); 6357 6358 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6359 CU_ASSERT(rc == 0); 6360 6361 poll_threads(); 6362 spdk_delay_us(1000); 6363 poll_threads(); 6364 6365 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6366 } 6367 6368 static void 6369 test_uuid_generation(void) 6370 { 6371 uint32_t nsid1 = 1, nsid2 = 2; 6372 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6373 char sn3[21] = " "; 6374 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6375 struct spdk_uuid uuid1, uuid2; 6376 6377 /* Test case 1: 6378 * Serial numbers are the same, nsids are different. 6379 * Compare two generated UUID - they should be different. */ 6380 uuid1 = nvme_generate_uuid(sn1, nsid1); 6381 uuid2 = nvme_generate_uuid(sn1, nsid2); 6382 6383 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6384 6385 /* Test case 2: 6386 * Serial numbers differ only by one character, nsids are the same. 6387 * Compare two generated UUID - they should be different. */ 6388 uuid1 = nvme_generate_uuid(sn1, nsid1); 6389 uuid2 = nvme_generate_uuid(sn2, nsid1); 6390 6391 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6392 6393 /* Test case 3: 6394 * Serial number comprises only of space characters. 6395 * Validate the generated UUID. */ 6396 uuid1 = nvme_generate_uuid(sn3, nsid1); 6397 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6398 } 6399 6400 static void 6401 test_retry_io_to_same_path(void) 6402 { 6403 struct nvme_path_id path1 = {}, path2 = {}; 6404 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6405 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6406 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6407 const int STRING_SIZE = 32; 6408 const char *attached_names[STRING_SIZE]; 6409 struct nvme_bdev *bdev; 6410 struct spdk_bdev_io *bdev_io; 6411 struct nvme_bdev_io *bio; 6412 struct spdk_io_channel *ch; 6413 struct nvme_bdev_channel *nbdev_ch; 6414 struct nvme_io_path *io_path1, *io_path2; 6415 struct ut_nvme_req *req; 6416 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6417 int done; 6418 int rc; 6419 6420 g_opts.nvme_ioq_poll_period_us = 1; 6421 6422 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6423 ut_init_trid(&path1.trid); 6424 ut_init_trid2(&path2.trid); 6425 g_ut_attach_ctrlr_status = 0; 6426 g_ut_attach_bdev_count = 1; 6427 6428 set_thread(0); 6429 6430 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6431 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6432 6433 ctrlr1->ns[0].uuid = &uuid1; 6434 6435 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6436 attach_ctrlr_done, NULL, NULL, NULL, true); 6437 CU_ASSERT(rc == 0); 6438 6439 spdk_delay_us(1000); 6440 poll_threads(); 6441 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6442 poll_threads(); 6443 6444 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6445 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6446 6447 ctrlr2->ns[0].uuid = &uuid1; 6448 6449 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6450 attach_ctrlr_done, NULL, NULL, NULL, true); 6451 CU_ASSERT(rc == 0); 6452 6453 spdk_delay_us(1000); 6454 poll_threads(); 6455 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6456 poll_threads(); 6457 6458 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6459 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6460 6461 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6462 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6463 6464 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 6465 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6466 6467 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6468 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6469 6470 done = -1; 6471 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6472 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6473 poll_threads(); 6474 CU_ASSERT(done == 0); 6475 6476 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6477 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6478 CU_ASSERT(bdev->rr_min_io == 1); 6479 6480 ch = spdk_get_io_channel(bdev); 6481 SPDK_CU_ASSERT_FATAL(ch != NULL); 6482 nbdev_ch = spdk_io_channel_get_ctx(ch); 6483 6484 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6485 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6486 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6487 6488 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6489 ut_bdev_io_set_buf(bdev_io); 6490 6491 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6492 6493 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6494 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6495 6496 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6497 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6498 6499 /* The 1st I/O should be submitted to io_path1. */ 6500 bdev_io->internal.in_submit_request = true; 6501 6502 bdev_nvme_submit_request(ch, bdev_io); 6503 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6504 CU_ASSERT(bio->io_path == io_path1); 6505 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6506 6507 spdk_delay_us(1); 6508 6509 poll_threads(); 6510 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6511 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6512 6513 /* The 2nd I/O should be submitted to io_path2 because the path selection 6514 * policy is round-robin. 6515 */ 6516 bdev_io->internal.in_submit_request = true; 6517 6518 bdev_nvme_submit_request(ch, bdev_io); 6519 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6520 CU_ASSERT(bio->io_path == io_path2); 6521 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6522 6523 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6524 SPDK_CU_ASSERT_FATAL(req != NULL); 6525 6526 /* Set retry count to non-zero. */ 6527 g_opts.bdev_retry_count = 2; 6528 6529 /* Inject an I/O error. */ 6530 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6531 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6532 6533 /* The 2nd I/O should be queued to nbdev_ch. */ 6534 spdk_delay_us(1); 6535 poll_thread_times(0, 1); 6536 6537 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6538 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6539 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6540 6541 /* The 2nd I/O should keep caching io_path2. */ 6542 CU_ASSERT(bio->io_path == io_path2); 6543 6544 /* The 2nd I/O should be submitted to io_path2 again. */ 6545 poll_thread_times(0, 1); 6546 6547 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6548 CU_ASSERT(bio->io_path == io_path2); 6549 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6550 6551 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6552 SPDK_CU_ASSERT_FATAL(req != NULL); 6553 6554 /* Inject an I/O error again. */ 6555 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6556 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6557 req->cpl.status.crd = 1; 6558 6559 ctrlr2->cdata.crdt[1] = 1; 6560 6561 /* The 2nd I/O should be queued to nbdev_ch. */ 6562 spdk_delay_us(1); 6563 poll_thread_times(0, 1); 6564 6565 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6566 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6567 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6568 6569 /* The 2nd I/O should keep caching io_path2. */ 6570 CU_ASSERT(bio->io_path == io_path2); 6571 6572 /* Detach ctrlr2 dynamically. */ 6573 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6574 CU_ASSERT(rc == 0); 6575 6576 spdk_delay_us(1000); 6577 poll_threads(); 6578 spdk_delay_us(1000); 6579 poll_threads(); 6580 spdk_delay_us(1000); 6581 poll_threads(); 6582 spdk_delay_us(1000); 6583 poll_threads(); 6584 6585 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 6586 6587 poll_threads(); 6588 spdk_delay_us(100000); 6589 poll_threads(); 6590 spdk_delay_us(1); 6591 poll_threads(); 6592 6593 /* The 2nd I/O should succeed by io_path1. */ 6594 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6595 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6596 CU_ASSERT(bio->io_path == io_path1); 6597 6598 free(bdev_io); 6599 6600 spdk_put_io_channel(ch); 6601 6602 poll_threads(); 6603 spdk_delay_us(1); 6604 poll_threads(); 6605 6606 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6607 CU_ASSERT(rc == 0); 6608 6609 poll_threads(); 6610 spdk_delay_us(1000); 6611 poll_threads(); 6612 6613 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6614 6615 g_opts.nvme_ioq_poll_period_us = 0; 6616 g_opts.bdev_retry_count = 0; 6617 } 6618 6619 /* This case is to verify a fix for a complex race condition that 6620 * failover is lost if fabric connect command gets timeout while 6621 * controller is being reset. 6622 */ 6623 static void 6624 test_race_between_reset_and_disconnected(void) 6625 { 6626 struct spdk_nvme_transport_id trid = {}; 6627 struct spdk_nvme_ctrlr ctrlr = {}; 6628 struct nvme_ctrlr *nvme_ctrlr = NULL; 6629 struct nvme_path_id *curr_trid; 6630 struct spdk_io_channel *ch1, *ch2; 6631 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6632 int rc; 6633 6634 ut_init_trid(&trid); 6635 TAILQ_INIT(&ctrlr.active_io_qpairs); 6636 6637 set_thread(0); 6638 6639 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6640 CU_ASSERT(rc == 0); 6641 6642 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6643 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6644 6645 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6646 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6647 6648 ch1 = spdk_get_io_channel(nvme_ctrlr); 6649 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6650 6651 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6652 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6653 6654 set_thread(1); 6655 6656 ch2 = spdk_get_io_channel(nvme_ctrlr); 6657 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6658 6659 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6660 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6661 6662 /* Reset starts from thread 1. */ 6663 set_thread(1); 6664 6665 nvme_ctrlr->resetting = false; 6666 curr_trid->last_failed_tsc = spdk_get_ticks(); 6667 ctrlr.is_failed = true; 6668 6669 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6670 CU_ASSERT(rc == 0); 6671 CU_ASSERT(nvme_ctrlr->resetting == true); 6672 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6673 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6674 6675 poll_thread_times(0, 3); 6676 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6677 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6678 6679 poll_thread_times(0, 1); 6680 poll_thread_times(1, 1); 6681 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6682 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6683 CU_ASSERT(ctrlr.is_failed == true); 6684 6685 poll_thread_times(1, 1); 6686 poll_thread_times(0, 1); 6687 CU_ASSERT(ctrlr.is_failed == false); 6688 CU_ASSERT(ctrlr.adminq.is_connected == false); 6689 6690 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6691 poll_thread_times(0, 2); 6692 CU_ASSERT(ctrlr.adminq.is_connected == true); 6693 6694 poll_thread_times(0, 1); 6695 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6696 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6697 6698 poll_thread_times(1, 1); 6699 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6700 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6701 CU_ASSERT(nvme_ctrlr->resetting == true); 6702 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6703 6704 poll_thread_times(0, 2); 6705 CU_ASSERT(nvme_ctrlr->resetting == true); 6706 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6707 poll_thread_times(1, 1); 6708 CU_ASSERT(nvme_ctrlr->resetting == true); 6709 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6710 6711 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6712 * 6713 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6714 * connect command is executed. If fabric connect command gets timeout, 6715 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6716 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6717 * 6718 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6719 */ 6720 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6721 CU_ASSERT(rc == -EINPROGRESS); 6722 CU_ASSERT(nvme_ctrlr->resetting == true); 6723 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6724 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6725 6726 poll_thread_times(0, 1); 6727 6728 CU_ASSERT(nvme_ctrlr->resetting == true); 6729 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6730 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6731 6732 poll_threads(); 6733 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6734 poll_threads(); 6735 6736 CU_ASSERT(nvme_ctrlr->resetting == false); 6737 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6738 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6739 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6740 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6741 6742 spdk_put_io_channel(ch2); 6743 6744 set_thread(0); 6745 6746 spdk_put_io_channel(ch1); 6747 6748 poll_threads(); 6749 6750 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6751 CU_ASSERT(rc == 0); 6752 6753 poll_threads(); 6754 spdk_delay_us(1000); 6755 poll_threads(); 6756 6757 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6758 } 6759 static void 6760 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6761 { 6762 int *_rc = (int *)cb_arg; 6763 6764 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6765 *_rc = rc; 6766 } 6767 6768 static void 6769 test_ctrlr_op_rpc(void) 6770 { 6771 struct spdk_nvme_transport_id trid = {}; 6772 struct spdk_nvme_ctrlr ctrlr = {}; 6773 struct nvme_ctrlr *nvme_ctrlr = NULL; 6774 struct nvme_path_id *curr_trid; 6775 struct spdk_io_channel *ch1, *ch2; 6776 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6777 int ctrlr_op_rc; 6778 int rc; 6779 6780 ut_init_trid(&trid); 6781 TAILQ_INIT(&ctrlr.active_io_qpairs); 6782 6783 set_thread(0); 6784 6785 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6786 CU_ASSERT(rc == 0); 6787 6788 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6789 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6790 6791 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6792 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6793 6794 ch1 = spdk_get_io_channel(nvme_ctrlr); 6795 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6796 6797 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6798 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6799 6800 set_thread(1); 6801 6802 ch2 = spdk_get_io_channel(nvme_ctrlr); 6803 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6804 6805 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6806 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6807 6808 /* Reset starts from thread 1. */ 6809 set_thread(1); 6810 6811 /* Case 1: ctrlr is already being destructed. */ 6812 nvme_ctrlr->destruct = true; 6813 ctrlr_op_rc = 0; 6814 6815 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6816 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6817 6818 poll_threads(); 6819 6820 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6821 6822 /* Case 2: reset is in progress. */ 6823 nvme_ctrlr->destruct = false; 6824 nvme_ctrlr->resetting = true; 6825 ctrlr_op_rc = 0; 6826 6827 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6828 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6829 6830 poll_threads(); 6831 6832 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6833 6834 /* Case 3: reset completes successfully. */ 6835 nvme_ctrlr->resetting = false; 6836 curr_trid->last_failed_tsc = spdk_get_ticks(); 6837 ctrlr.is_failed = true; 6838 ctrlr_op_rc = -1; 6839 6840 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6841 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6842 6843 CU_ASSERT(nvme_ctrlr->resetting == true); 6844 CU_ASSERT(ctrlr_op_rc == -1); 6845 6846 poll_threads(); 6847 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6848 poll_threads(); 6849 6850 CU_ASSERT(nvme_ctrlr->resetting == false); 6851 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6852 CU_ASSERT(ctrlr.is_failed == false); 6853 CU_ASSERT(ctrlr_op_rc == 0); 6854 6855 /* Case 4: invalid operation. */ 6856 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6857 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6858 6859 poll_threads(); 6860 6861 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6862 6863 spdk_put_io_channel(ch2); 6864 6865 set_thread(0); 6866 6867 spdk_put_io_channel(ch1); 6868 6869 poll_threads(); 6870 6871 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6872 CU_ASSERT(rc == 0); 6873 6874 poll_threads(); 6875 spdk_delay_us(1000); 6876 poll_threads(); 6877 6878 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6879 } 6880 6881 static void 6882 test_bdev_ctrlr_op_rpc(void) 6883 { 6884 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6885 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6886 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6887 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6888 struct nvme_path_id *curr_trid1, *curr_trid2; 6889 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6890 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6891 int ctrlr_op_rc; 6892 int rc; 6893 6894 ut_init_trid(&trid1); 6895 ut_init_trid2(&trid2); 6896 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6897 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6898 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6899 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6900 ctrlr1.cdata.cntlid = 1; 6901 ctrlr2.cdata.cntlid = 2; 6902 ctrlr1.adminq.is_connected = true; 6903 ctrlr2.adminq.is_connected = true; 6904 6905 set_thread(0); 6906 6907 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6908 CU_ASSERT(rc == 0); 6909 6910 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6911 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6912 6913 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1); 6914 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6915 6916 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6917 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6918 6919 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6920 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6921 6922 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6923 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6924 6925 set_thread(1); 6926 6927 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6928 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6929 6930 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6931 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6932 6933 set_thread(0); 6934 6935 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6936 CU_ASSERT(rc == 0); 6937 6938 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2); 6939 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6940 6941 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 6942 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 6943 6944 ch21 = spdk_get_io_channel(nvme_ctrlr2); 6945 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 6946 6947 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 6948 CU_ASSERT(ctrlr_ch21->qpair != NULL); 6949 6950 set_thread(1); 6951 6952 ch22 = spdk_get_io_channel(nvme_ctrlr2); 6953 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 6954 6955 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 6956 CU_ASSERT(ctrlr_ch22->qpair != NULL); 6957 6958 /* Reset starts from thread 1. */ 6959 set_thread(1); 6960 6961 nvme_ctrlr1->resetting = false; 6962 nvme_ctrlr2->resetting = false; 6963 curr_trid1->last_failed_tsc = spdk_get_ticks(); 6964 curr_trid2->last_failed_tsc = spdk_get_ticks(); 6965 ctrlr_op_rc = -1; 6966 6967 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 6968 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6969 6970 CU_ASSERT(nvme_ctrlr1->resetting == true); 6971 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6972 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6973 CU_ASSERT(nvme_ctrlr2->resetting == false); 6974 6975 poll_thread_times(0, 3); 6976 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6977 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 6978 6979 poll_thread_times(0, 1); 6980 poll_thread_times(1, 1); 6981 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 6982 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 6983 6984 poll_thread_times(1, 1); 6985 poll_thread_times(0, 1); 6986 CU_ASSERT(ctrlr1.adminq.is_connected == false); 6987 6988 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6989 poll_thread_times(0, 2); 6990 CU_ASSERT(ctrlr1.adminq.is_connected == true); 6991 6992 poll_thread_times(0, 1); 6993 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 6994 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 6995 6996 poll_thread_times(1, 1); 6997 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 6998 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 6999 CU_ASSERT(nvme_ctrlr1->resetting == true); 7000 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7001 7002 poll_thread_times(0, 2); 7003 poll_thread_times(1, 1); 7004 poll_thread_times(0, 1); 7005 poll_thread_times(1, 1); 7006 poll_thread_times(0, 1); 7007 poll_thread_times(1, 1); 7008 poll_thread_times(0, 1); 7009 7010 CU_ASSERT(nvme_ctrlr1->resetting == false); 7011 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7012 CU_ASSERT(nvme_ctrlr2->resetting == true); 7013 7014 poll_threads(); 7015 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7016 poll_threads(); 7017 7018 CU_ASSERT(nvme_ctrlr2->resetting == false); 7019 CU_ASSERT(ctrlr_op_rc == 0); 7020 7021 set_thread(1); 7022 7023 spdk_put_io_channel(ch12); 7024 spdk_put_io_channel(ch22); 7025 7026 set_thread(0); 7027 7028 spdk_put_io_channel(ch11); 7029 spdk_put_io_channel(ch21); 7030 7031 poll_threads(); 7032 7033 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7034 CU_ASSERT(rc == 0); 7035 7036 poll_threads(); 7037 spdk_delay_us(1000); 7038 poll_threads(); 7039 7040 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7041 } 7042 7043 static void 7044 test_disable_enable_ctrlr(void) 7045 { 7046 struct spdk_nvme_transport_id trid = {}; 7047 struct spdk_nvme_ctrlr ctrlr = {}; 7048 struct nvme_ctrlr *nvme_ctrlr = NULL; 7049 struct nvme_path_id *curr_trid; 7050 struct spdk_io_channel *ch1, *ch2; 7051 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7052 int rc; 7053 7054 ut_init_trid(&trid); 7055 TAILQ_INIT(&ctrlr.active_io_qpairs); 7056 ctrlr.adminq.is_connected = true; 7057 7058 set_thread(0); 7059 7060 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7061 CU_ASSERT(rc == 0); 7062 7063 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7064 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7065 7066 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7067 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7068 7069 ch1 = spdk_get_io_channel(nvme_ctrlr); 7070 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7071 7072 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7073 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7074 7075 set_thread(1); 7076 7077 ch2 = spdk_get_io_channel(nvme_ctrlr); 7078 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7079 7080 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7081 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7082 7083 /* Disable starts from thread 1. */ 7084 set_thread(1); 7085 7086 /* Case 1: ctrlr is already disabled. */ 7087 nvme_ctrlr->disabled = true; 7088 7089 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7090 CU_ASSERT(rc == -EALREADY); 7091 7092 /* Case 2: ctrlr is already being destructed. */ 7093 nvme_ctrlr->disabled = false; 7094 nvme_ctrlr->destruct = true; 7095 7096 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7097 CU_ASSERT(rc == -ENXIO); 7098 7099 /* Case 3: reset is in progress. */ 7100 nvme_ctrlr->destruct = false; 7101 nvme_ctrlr->resetting = true; 7102 7103 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7104 CU_ASSERT(rc == -EBUSY); 7105 7106 /* Case 4: disable completes successfully. */ 7107 nvme_ctrlr->resetting = false; 7108 7109 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7110 CU_ASSERT(rc == 0); 7111 CU_ASSERT(nvme_ctrlr->resetting == true); 7112 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7113 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7114 7115 poll_thread_times(0, 3); 7116 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7117 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7118 7119 poll_thread_times(0, 1); 7120 poll_thread_times(1, 1); 7121 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7122 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7123 7124 poll_thread_times(1, 1); 7125 poll_thread_times(0, 1); 7126 CU_ASSERT(ctrlr.adminq.is_connected == false); 7127 poll_thread_times(1, 1); 7128 poll_thread_times(0, 1); 7129 poll_thread_times(1, 1); 7130 poll_thread_times(0, 1); 7131 CU_ASSERT(nvme_ctrlr->resetting == false); 7132 CU_ASSERT(nvme_ctrlr->disabled == true); 7133 7134 /* Case 5: enable completes successfully. */ 7135 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7136 CU_ASSERT(rc == 0); 7137 7138 CU_ASSERT(nvme_ctrlr->resetting == true); 7139 CU_ASSERT(nvme_ctrlr->disabled == false); 7140 7141 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7142 poll_thread_times(0, 2); 7143 CU_ASSERT(ctrlr.adminq.is_connected == true); 7144 7145 poll_thread_times(0, 1); 7146 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7147 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7148 7149 poll_thread_times(1, 1); 7150 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7151 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7152 CU_ASSERT(nvme_ctrlr->resetting == true); 7153 7154 poll_thread_times(0, 2); 7155 CU_ASSERT(nvme_ctrlr->resetting == true); 7156 poll_thread_times(1, 1); 7157 CU_ASSERT(nvme_ctrlr->resetting == true); 7158 poll_thread_times(0, 1); 7159 CU_ASSERT(nvme_ctrlr->resetting == false); 7160 7161 /* Case 6: ctrlr is already enabled. */ 7162 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7163 CU_ASSERT(rc == -EALREADY); 7164 7165 set_thread(0); 7166 7167 /* Case 7: disable cancels delayed reconnect. */ 7168 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7169 ctrlr.fail_reset = true; 7170 7171 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7172 CU_ASSERT(rc == 0); 7173 7174 poll_threads(); 7175 7176 CU_ASSERT(nvme_ctrlr->resetting == false); 7177 CU_ASSERT(ctrlr.is_failed == false); 7178 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7179 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7180 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7181 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7182 7183 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7184 CU_ASSERT(rc == 0); 7185 7186 CU_ASSERT(nvme_ctrlr->resetting == true); 7187 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7188 7189 poll_threads(); 7190 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7191 poll_threads(); 7192 7193 CU_ASSERT(nvme_ctrlr->resetting == false); 7194 CU_ASSERT(nvme_ctrlr->disabled == true); 7195 7196 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7197 CU_ASSERT(rc == 0); 7198 7199 CU_ASSERT(nvme_ctrlr->resetting == true); 7200 CU_ASSERT(nvme_ctrlr->disabled == false); 7201 7202 poll_threads(); 7203 7204 CU_ASSERT(nvme_ctrlr->resetting == false); 7205 7206 set_thread(1); 7207 7208 spdk_put_io_channel(ch2); 7209 7210 set_thread(0); 7211 7212 spdk_put_io_channel(ch1); 7213 7214 poll_threads(); 7215 7216 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7217 CU_ASSERT(rc == 0); 7218 7219 poll_threads(); 7220 spdk_delay_us(1000); 7221 poll_threads(); 7222 7223 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7224 } 7225 7226 static void 7227 ut_delete_done(void *ctx, int rc) 7228 { 7229 int *delete_done_rc = ctx; 7230 *delete_done_rc = rc; 7231 } 7232 7233 static void 7234 test_delete_ctrlr_done(void) 7235 { 7236 struct spdk_nvme_transport_id trid = {}; 7237 struct spdk_nvme_ctrlr ctrlr = {}; 7238 int delete_done_rc = 0xDEADBEEF; 7239 int rc; 7240 7241 ut_init_trid(&trid); 7242 7243 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7244 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7245 7246 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7247 CU_ASSERT(rc == 0); 7248 7249 for (int i = 0; i < 20; i++) { 7250 poll_threads(); 7251 if (delete_done_rc == 0) { 7252 break; 7253 } 7254 spdk_delay_us(1000); 7255 } 7256 7257 CU_ASSERT(delete_done_rc == 0); 7258 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7259 } 7260 7261 int 7262 main(int argc, char **argv) 7263 { 7264 CU_pSuite suite = NULL; 7265 unsigned int num_failures; 7266 7267 CU_initialize_registry(); 7268 7269 suite = CU_add_suite("nvme", NULL, NULL); 7270 7271 CU_ADD_TEST(suite, test_create_ctrlr); 7272 CU_ADD_TEST(suite, test_reset_ctrlr); 7273 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7274 CU_ADD_TEST(suite, test_failover_ctrlr); 7275 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7276 CU_ADD_TEST(suite, test_pending_reset); 7277 CU_ADD_TEST(suite, test_attach_ctrlr); 7278 CU_ADD_TEST(suite, test_aer_cb); 7279 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7280 CU_ADD_TEST(suite, test_add_remove_trid); 7281 CU_ADD_TEST(suite, test_abort); 7282 CU_ADD_TEST(suite, test_get_io_qpair); 7283 CU_ADD_TEST(suite, test_bdev_unregister); 7284 CU_ADD_TEST(suite, test_compare_ns); 7285 CU_ADD_TEST(suite, test_init_ana_log_page); 7286 CU_ADD_TEST(suite, test_get_memory_domains); 7287 CU_ADD_TEST(suite, test_reconnect_qpair); 7288 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7289 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7290 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7291 CU_ADD_TEST(suite, test_admin_path); 7292 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7293 CU_ADD_TEST(suite, test_find_io_path); 7294 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7295 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7296 CU_ADD_TEST(suite, test_retry_io_count); 7297 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7298 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7299 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7300 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7301 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7302 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7303 CU_ADD_TEST(suite, test_fail_path); 7304 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7305 CU_ADD_TEST(suite, test_ana_transition); 7306 CU_ADD_TEST(suite, test_set_preferred_path); 7307 CU_ADD_TEST(suite, test_find_next_io_path); 7308 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7309 CU_ADD_TEST(suite, test_disable_auto_failback); 7310 CU_ADD_TEST(suite, test_set_multipath_policy); 7311 CU_ADD_TEST(suite, test_uuid_generation); 7312 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7313 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7314 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7315 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7316 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7317 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7318 7319 allocate_threads(3); 7320 set_thread(0); 7321 bdev_nvme_library_init(); 7322 init_accel(); 7323 7324 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7325 7326 set_thread(0); 7327 bdev_nvme_library_fini(); 7328 fini_accel(); 7329 free_threads(); 7330 7331 CU_cleanup_registry(); 7332 7333 return num_failures; 7334 } 7335