1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 65 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 66 67 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 68 69 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 70 enum spdk_bdev_reset_stat_mode mode)); 71 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 72 struct spdk_bdev_io_stat *add)); 73 74 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 75 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 76 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 77 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 78 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0); 79 80 int 81 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 82 struct spdk_memory_domain **domains, int array_size) 83 { 84 int i, min_array_size; 85 86 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 87 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 88 for (i = 0; i < min_array_size; i++) { 89 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 90 } 91 } 92 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 93 94 return 0; 95 } 96 97 struct spdk_io_channel * 98 spdk_accel_get_io_channel(void) 99 { 100 return spdk_get_io_channel(g_accel_p); 101 } 102 103 void 104 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 105 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 106 { 107 /* Avoid warning that opts is used uninitialised */ 108 memset(opts, 0, opts_size); 109 } 110 111 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003" 112 113 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN}; 114 115 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 116 (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts); 117 118 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 119 (const struct spdk_nvme_ctrlr *ctrlr), 0); 120 121 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 122 (struct spdk_nvme_ctrlr *ctrlr), NULL); 123 124 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 125 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 126 127 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 128 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 129 130 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 133 134 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 135 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 136 137 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 138 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 139 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 140 141 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 142 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 143 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 144 145 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 146 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 147 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 148 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 149 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 150 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 151 152 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 153 size_t *size), 0); 154 155 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 156 157 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 158 159 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 160 161 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns), 164 SPDK_NVME_16B_GUARD_PI); 165 166 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 167 168 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 169 170 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 171 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 172 173 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 174 175 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 176 char *name, size_t *size), 0); 177 178 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 179 (struct spdk_nvme_ns *ns), 0); 180 181 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 182 (const struct spdk_nvme_ctrlr *ctrlr), 0); 183 184 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 185 (struct spdk_nvme_ns *ns), 0); 186 187 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 188 (struct spdk_nvme_ns *ns), 0); 189 190 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 191 (struct spdk_nvme_ns *ns), 0); 192 193 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 194 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 195 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 196 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 197 198 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 199 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 200 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 201 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 202 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 203 204 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 205 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 206 void *payload, uint32_t payload_size, uint64_t slba, 207 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 208 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 209 210 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 211 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 212 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 213 214 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 215 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 216 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 217 218 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 219 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 220 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 221 222 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 223 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 224 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 225 226 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 227 228 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 229 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 230 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 231 232 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 233 (const struct spdk_nvme_status *status), NULL); 234 235 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 236 (const struct spdk_nvme_status *status), NULL); 237 238 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 239 240 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 241 242 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 243 244 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 245 246 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 247 248 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 249 struct iovec *iov, 250 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 251 DEFINE_STUB(spdk_accel_append_crc32c, int, 252 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 253 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 254 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 255 DEFINE_STUB_V(spdk_accel_sequence_finish, 256 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 257 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 258 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 259 260 struct ut_nvme_req { 261 uint16_t opc; 262 spdk_nvme_cmd_cb cb_fn; 263 void *cb_arg; 264 struct spdk_nvme_cpl cpl; 265 TAILQ_ENTRY(ut_nvme_req) tailq; 266 }; 267 268 struct spdk_nvme_ns { 269 struct spdk_nvme_ctrlr *ctrlr; 270 uint32_t id; 271 bool is_active; 272 struct spdk_uuid *uuid; 273 enum spdk_nvme_ana_state ana_state; 274 enum spdk_nvme_csi csi; 275 }; 276 277 struct spdk_nvme_qpair { 278 struct spdk_nvme_ctrlr *ctrlr; 279 uint8_t failure_reason; 280 bool is_connected; 281 bool in_completion_context; 282 bool delete_after_completion_context; 283 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 284 uint32_t num_outstanding_reqs; 285 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 286 struct spdk_nvme_poll_group *poll_group; 287 void *poll_group_tailq_head; 288 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 289 }; 290 291 struct spdk_nvme_ctrlr { 292 uint32_t num_ns; 293 struct spdk_nvme_ns *ns; 294 struct spdk_nvme_ns_data *nsdata; 295 struct spdk_nvme_qpair adminq; 296 struct spdk_nvme_ctrlr_data cdata; 297 bool attached; 298 bool is_failed; 299 bool fail_reset; 300 bool is_removed; 301 struct spdk_nvme_transport_id trid; 302 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 303 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 304 struct spdk_nvme_ctrlr_opts opts; 305 }; 306 307 struct spdk_nvme_poll_group { 308 void *ctx; 309 struct spdk_nvme_accel_fn_table accel_fn_table; 310 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 311 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 312 }; 313 314 struct spdk_nvme_probe_ctx { 315 struct spdk_nvme_transport_id trid; 316 void *cb_ctx; 317 spdk_nvme_attach_cb attach_cb; 318 struct spdk_nvme_ctrlr *init_ctrlr; 319 }; 320 321 uint32_t 322 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 323 { 324 uint32_t nsid; 325 326 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 327 if (ctrlr->ns[nsid - 1].is_active) { 328 return nsid; 329 } 330 } 331 332 return 0; 333 } 334 335 uint32_t 336 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 337 { 338 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 339 if (ctrlr->ns[nsid - 1].is_active) { 340 return nsid; 341 } 342 } 343 344 return 0; 345 } 346 347 uint32_t 348 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 349 { 350 return qpair->num_outstanding_reqs; 351 } 352 353 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 354 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 355 g_ut_attached_ctrlrs); 356 static int g_ut_attach_ctrlr_status; 357 static size_t g_ut_attach_bdev_count; 358 static int g_ut_register_bdev_status; 359 static struct spdk_bdev *g_ut_registered_bdev; 360 static uint16_t g_ut_cntlid; 361 static struct nvme_path_id g_any_path = {}; 362 363 static void 364 ut_init_trid(struct spdk_nvme_transport_id *trid) 365 { 366 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 367 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 368 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 369 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 370 } 371 372 static void 373 ut_init_trid2(struct spdk_nvme_transport_id *trid) 374 { 375 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 376 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 377 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 378 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 379 } 380 381 static void 382 ut_init_trid3(struct spdk_nvme_transport_id *trid) 383 { 384 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 385 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 386 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 387 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 388 } 389 390 static int 391 cmp_int(int a, int b) 392 { 393 return a - b; 394 } 395 396 int 397 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 398 const struct spdk_nvme_transport_id *trid2) 399 { 400 int cmp; 401 402 /* We assume trtype is TCP for now. */ 403 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 404 405 cmp = cmp_int(trid1->trtype, trid2->trtype); 406 if (cmp) { 407 return cmp; 408 } 409 410 cmp = strcasecmp(trid1->traddr, trid2->traddr); 411 if (cmp) { 412 return cmp; 413 } 414 415 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 416 if (cmp) { 417 return cmp; 418 } 419 420 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 421 if (cmp) { 422 return cmp; 423 } 424 425 cmp = strcmp(trid1->subnqn, trid2->subnqn); 426 if (cmp) { 427 return cmp; 428 } 429 430 return 0; 431 } 432 433 static struct spdk_nvme_ctrlr * 434 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 435 bool ana_reporting, bool multipath) 436 { 437 struct spdk_nvme_ctrlr *ctrlr; 438 uint32_t i; 439 440 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 441 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 442 /* There is a ctrlr whose trid matches. */ 443 return NULL; 444 } 445 } 446 447 ctrlr = calloc(1, sizeof(*ctrlr)); 448 if (ctrlr == NULL) { 449 return NULL; 450 } 451 452 ctrlr->attached = true; 453 ctrlr->adminq.ctrlr = ctrlr; 454 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 455 ctrlr->adminq.is_connected = true; 456 457 if (num_ns != 0) { 458 ctrlr->num_ns = num_ns; 459 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 460 if (ctrlr->ns == NULL) { 461 free(ctrlr); 462 return NULL; 463 } 464 465 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 466 if (ctrlr->nsdata == NULL) { 467 free(ctrlr->ns); 468 free(ctrlr); 469 return NULL; 470 } 471 472 for (i = 0; i < num_ns; i++) { 473 ctrlr->ns[i].id = i + 1; 474 ctrlr->ns[i].ctrlr = ctrlr; 475 ctrlr->ns[i].is_active = true; 476 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 477 ctrlr->nsdata[i].nsze = 1024; 478 ctrlr->nsdata[i].nmic.can_share = multipath; 479 } 480 481 ctrlr->cdata.nn = num_ns; 482 ctrlr->cdata.mnan = num_ns; 483 ctrlr->cdata.nanagrpid = num_ns; 484 } 485 486 ctrlr->cdata.cntlid = ++g_ut_cntlid; 487 ctrlr->cdata.cmic.multi_ctrlr = multipath; 488 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 489 ctrlr->trid = *trid; 490 TAILQ_INIT(&ctrlr->active_io_qpairs); 491 492 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 493 494 return ctrlr; 495 } 496 497 static void 498 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 499 { 500 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 501 502 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 503 free(ctrlr->nsdata); 504 free(ctrlr->ns); 505 free(ctrlr); 506 } 507 508 static int 509 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 510 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 511 { 512 struct ut_nvme_req *req; 513 514 req = calloc(1, sizeof(*req)); 515 if (req == NULL) { 516 return -ENOMEM; 517 } 518 519 req->opc = opc; 520 req->cb_fn = cb_fn; 521 req->cb_arg = cb_arg; 522 523 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 524 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 525 526 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 527 qpair->num_outstanding_reqs++; 528 529 return 0; 530 } 531 532 static struct ut_nvme_req * 533 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 534 { 535 struct ut_nvme_req *req; 536 537 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 538 if (req->cb_arg == cb_arg) { 539 break; 540 } 541 } 542 543 return req; 544 } 545 546 static struct spdk_bdev_io * 547 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 548 struct spdk_io_channel *ch) 549 { 550 struct spdk_bdev_io *bdev_io; 551 552 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 553 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 554 bdev_io->type = type; 555 bdev_io->bdev = &nbdev->disk; 556 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 557 558 return bdev_io; 559 } 560 561 static void 562 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 563 { 564 bdev_io->u.bdev.iovs = &bdev_io->iov; 565 bdev_io->u.bdev.iovcnt = 1; 566 567 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 568 bdev_io->iov.iov_len = 4096; 569 } 570 571 static void 572 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 573 { 574 if (ctrlr->is_failed) { 575 free(ctrlr); 576 return; 577 } 578 579 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 580 if (probe_ctx->cb_ctx) { 581 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 582 } 583 584 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 585 586 if (probe_ctx->attach_cb) { 587 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 588 } 589 } 590 591 int 592 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 593 { 594 struct spdk_nvme_ctrlr *ctrlr, *tmp; 595 596 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 597 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 598 continue; 599 } 600 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 601 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 602 } 603 604 free(probe_ctx); 605 606 return 0; 607 } 608 609 struct spdk_nvme_probe_ctx * 610 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 611 const struct spdk_nvme_ctrlr_opts *opts, 612 spdk_nvme_attach_cb attach_cb) 613 { 614 struct spdk_nvme_probe_ctx *probe_ctx; 615 616 if (trid == NULL) { 617 return NULL; 618 } 619 620 probe_ctx = calloc(1, sizeof(*probe_ctx)); 621 if (probe_ctx == NULL) { 622 return NULL; 623 } 624 625 probe_ctx->trid = *trid; 626 probe_ctx->cb_ctx = (void *)opts; 627 probe_ctx->attach_cb = attach_cb; 628 629 return probe_ctx; 630 } 631 632 int 633 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 634 { 635 if (ctrlr->attached) { 636 ut_detach_ctrlr(ctrlr); 637 } 638 639 return 0; 640 } 641 642 int 643 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 644 { 645 SPDK_CU_ASSERT_FATAL(ctx != NULL); 646 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 647 648 return 0; 649 } 650 651 int 652 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 653 { 654 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 655 } 656 657 void 658 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 659 { 660 memset(opts, 0, opts_size); 661 662 snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN); 663 } 664 665 const struct spdk_nvme_ctrlr_data * 666 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 667 { 668 return &ctrlr->cdata; 669 } 670 671 uint32_t 672 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 673 { 674 return ctrlr->num_ns; 675 } 676 677 struct spdk_nvme_ns * 678 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 679 { 680 if (nsid < 1 || nsid > ctrlr->num_ns) { 681 return NULL; 682 } 683 684 return &ctrlr->ns[nsid - 1]; 685 } 686 687 bool 688 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 689 { 690 if (nsid < 1 || nsid > ctrlr->num_ns) { 691 return false; 692 } 693 694 return ctrlr->ns[nsid - 1].is_active; 695 } 696 697 union spdk_nvme_csts_register 698 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 699 { 700 union spdk_nvme_csts_register csts; 701 702 csts.raw = 0; 703 704 return csts; 705 } 706 707 union spdk_nvme_vs_register 708 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 709 { 710 union spdk_nvme_vs_register vs; 711 712 vs.raw = 0; 713 714 return vs; 715 } 716 717 struct spdk_nvme_qpair * 718 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 719 const struct spdk_nvme_io_qpair_opts *user_opts, 720 size_t opts_size) 721 { 722 struct spdk_nvme_qpair *qpair; 723 724 qpair = calloc(1, sizeof(*qpair)); 725 if (qpair == NULL) { 726 return NULL; 727 } 728 729 qpair->ctrlr = ctrlr; 730 TAILQ_INIT(&qpair->outstanding_reqs); 731 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 732 733 return qpair; 734 } 735 736 static void 737 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 738 { 739 struct spdk_nvme_poll_group *group = qpair->poll_group; 740 741 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 742 743 qpair->poll_group_tailq_head = &group->connected_qpairs; 744 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 745 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 746 } 747 748 static void 749 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 750 { 751 struct spdk_nvme_poll_group *group = qpair->poll_group; 752 753 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 754 755 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 756 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 757 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 758 } 759 760 int 761 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 762 struct spdk_nvme_qpair *qpair) 763 { 764 if (qpair->is_connected) { 765 return -EISCONN; 766 } 767 768 qpair->is_connected = true; 769 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 770 771 if (qpair->poll_group) { 772 nvme_poll_group_connect_qpair(qpair); 773 } 774 775 return 0; 776 } 777 778 void 779 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 780 { 781 if (!qpair->is_connected) { 782 return; 783 } 784 785 qpair->is_connected = false; 786 787 if (qpair->poll_group != NULL) { 788 nvme_poll_group_disconnect_qpair(qpair); 789 } 790 } 791 792 int 793 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 794 { 795 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 796 797 if (qpair->in_completion_context) { 798 qpair->delete_after_completion_context = true; 799 return 0; 800 } 801 802 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 803 804 if (qpair->poll_group != NULL) { 805 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 806 } 807 808 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 809 810 CU_ASSERT(qpair->num_outstanding_reqs == 0); 811 812 free(qpair); 813 814 return 0; 815 } 816 817 int 818 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 819 { 820 if (ctrlr->fail_reset) { 821 ctrlr->is_failed = true; 822 return -EIO; 823 } 824 825 ctrlr->adminq.is_connected = true; 826 return 0; 827 } 828 829 void 830 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 831 { 832 } 833 834 int 835 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 836 { 837 if (ctrlr->is_removed) { 838 return -ENXIO; 839 } 840 841 ctrlr->adminq.is_connected = false; 842 ctrlr->is_failed = false; 843 844 return 0; 845 } 846 847 void 848 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 849 { 850 ctrlr->is_failed = true; 851 } 852 853 bool 854 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 855 { 856 return ctrlr->is_failed; 857 } 858 859 spdk_nvme_qp_failure_reason 860 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 861 { 862 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 863 } 864 865 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 866 sizeof(uint32_t)) 867 static void 868 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 869 { 870 struct spdk_nvme_ana_page ana_hdr; 871 char _ana_desc[UT_ANA_DESC_SIZE]; 872 struct spdk_nvme_ana_group_descriptor *ana_desc; 873 struct spdk_nvme_ns *ns; 874 uint32_t i; 875 876 memset(&ana_hdr, 0, sizeof(ana_hdr)); 877 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 878 879 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 880 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 881 882 buf += sizeof(ana_hdr); 883 length -= sizeof(ana_hdr); 884 885 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 886 887 for (i = 0; i < ctrlr->num_ns; i++) { 888 ns = &ctrlr->ns[i]; 889 890 if (!ns->is_active) { 891 continue; 892 } 893 894 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 895 896 ana_desc->ana_group_id = ns->id; 897 ana_desc->num_of_nsid = 1; 898 ana_desc->ana_state = ns->ana_state; 899 ana_desc->nsid[0] = ns->id; 900 901 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 902 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 903 904 buf += UT_ANA_DESC_SIZE; 905 length -= UT_ANA_DESC_SIZE; 906 } 907 } 908 909 int 910 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 911 uint8_t log_page, uint32_t nsid, 912 void *payload, uint32_t payload_size, 913 uint64_t offset, 914 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 915 { 916 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 917 SPDK_CU_ASSERT_FATAL(offset == 0); 918 ut_create_ana_log_page(ctrlr, payload, payload_size); 919 } 920 921 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 922 cb_fn, cb_arg); 923 } 924 925 int 926 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 927 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 928 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 929 { 930 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 931 } 932 933 int 934 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 935 void *cmd_cb_arg, 936 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 937 { 938 struct ut_nvme_req *req = NULL, *abort_req; 939 940 if (qpair == NULL) { 941 qpair = &ctrlr->adminq; 942 } 943 944 abort_req = calloc(1, sizeof(*abort_req)); 945 if (abort_req == NULL) { 946 return -ENOMEM; 947 } 948 949 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 950 if (req->cb_arg == cmd_cb_arg) { 951 break; 952 } 953 } 954 955 if (req == NULL) { 956 free(abort_req); 957 return -ENOENT; 958 } 959 960 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 961 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 962 963 abort_req->opc = SPDK_NVME_OPC_ABORT; 964 abort_req->cb_fn = cb_fn; 965 abort_req->cb_arg = cb_arg; 966 967 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 968 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 969 abort_req->cpl.cdw0 = 0; 970 971 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 972 ctrlr->adminq.num_outstanding_reqs++; 973 974 return 0; 975 } 976 977 int32_t 978 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 979 { 980 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 981 } 982 983 uint32_t 984 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 985 { 986 return ns->id; 987 } 988 989 struct spdk_nvme_ctrlr * 990 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 991 { 992 return ns->ctrlr; 993 } 994 995 static inline struct spdk_nvme_ns_data * 996 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 997 { 998 return &ns->ctrlr->nsdata[ns->id - 1]; 999 } 1000 1001 const struct spdk_nvme_ns_data * 1002 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 1003 { 1004 return _nvme_ns_get_data(ns); 1005 } 1006 1007 uint64_t 1008 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1009 { 1010 return _nvme_ns_get_data(ns)->nsze; 1011 } 1012 1013 const struct spdk_uuid * 1014 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1015 { 1016 return ns->uuid; 1017 } 1018 1019 enum spdk_nvme_csi 1020 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1021 return ns->csi; 1022 } 1023 1024 int 1025 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1026 void *metadata, uint64_t lba, uint32_t lba_count, 1027 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1028 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1029 { 1030 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1031 } 1032 1033 int 1034 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1035 void *buffer, void *metadata, uint64_t lba, 1036 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1037 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1038 { 1039 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1040 } 1041 1042 int 1043 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1044 uint64_t lba, uint32_t lba_count, 1045 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1046 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1047 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1048 uint16_t apptag_mask, uint16_t apptag) 1049 { 1050 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1051 } 1052 1053 int 1054 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1055 uint64_t lba, uint32_t lba_count, 1056 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1057 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1058 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1059 uint16_t apptag_mask, uint16_t apptag) 1060 { 1061 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1062 } 1063 1064 static bool g_ut_readv_ext_called; 1065 int 1066 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1067 uint64_t lba, uint32_t lba_count, 1068 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1069 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1070 spdk_nvme_req_next_sge_cb next_sge_fn, 1071 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1072 { 1073 g_ut_readv_ext_called = true; 1074 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1075 } 1076 1077 static bool g_ut_read_ext_called; 1078 int 1079 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1080 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1081 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1082 { 1083 g_ut_read_ext_called = true; 1084 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1085 } 1086 1087 static bool g_ut_writev_ext_called; 1088 int 1089 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1090 uint64_t lba, uint32_t lba_count, 1091 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1092 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1093 spdk_nvme_req_next_sge_cb next_sge_fn, 1094 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1095 { 1096 g_ut_writev_ext_called = true; 1097 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1098 } 1099 1100 static bool g_ut_write_ext_called; 1101 int 1102 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1103 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1104 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1105 { 1106 g_ut_write_ext_called = true; 1107 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1108 } 1109 1110 int 1111 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1112 uint64_t lba, uint32_t lba_count, 1113 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1114 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1115 spdk_nvme_req_next_sge_cb next_sge_fn, 1116 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1117 { 1118 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1119 } 1120 1121 int 1122 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1123 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1124 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1125 { 1126 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1127 } 1128 1129 int 1130 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1131 uint64_t lba, uint32_t lba_count, 1132 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1133 uint32_t io_flags) 1134 { 1135 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1136 } 1137 1138 int 1139 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1140 const struct spdk_nvme_scc_source_range *ranges, 1141 uint16_t num_ranges, uint64_t dest_lba, 1142 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1143 { 1144 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1145 } 1146 1147 struct spdk_nvme_poll_group * 1148 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1149 { 1150 struct spdk_nvme_poll_group *group; 1151 1152 group = calloc(1, sizeof(*group)); 1153 if (group == NULL) { 1154 return NULL; 1155 } 1156 1157 group->ctx = ctx; 1158 if (table != NULL) { 1159 group->accel_fn_table = *table; 1160 } 1161 TAILQ_INIT(&group->connected_qpairs); 1162 TAILQ_INIT(&group->disconnected_qpairs); 1163 1164 return group; 1165 } 1166 1167 int 1168 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1169 { 1170 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1171 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1172 return -EBUSY; 1173 } 1174 1175 free(group); 1176 1177 return 0; 1178 } 1179 1180 spdk_nvme_qp_failure_reason 1181 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1182 { 1183 return qpair->failure_reason; 1184 } 1185 1186 bool 1187 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1188 { 1189 return qpair->is_connected; 1190 } 1191 1192 int32_t 1193 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1194 uint32_t max_completions) 1195 { 1196 struct ut_nvme_req *req, *tmp; 1197 uint32_t num_completions = 0; 1198 1199 if (!qpair->is_connected) { 1200 return -ENXIO; 1201 } 1202 1203 qpair->in_completion_context = true; 1204 1205 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1206 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1207 qpair->num_outstanding_reqs--; 1208 1209 req->cb_fn(req->cb_arg, &req->cpl); 1210 1211 free(req); 1212 num_completions++; 1213 } 1214 1215 qpair->in_completion_context = false; 1216 if (qpair->delete_after_completion_context) { 1217 spdk_nvme_ctrlr_free_io_qpair(qpair); 1218 } 1219 1220 return num_completions; 1221 } 1222 1223 int64_t 1224 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1225 uint32_t completions_per_qpair, 1226 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1227 { 1228 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1229 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1230 1231 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1232 1233 if (disconnected_qpair_cb == NULL) { 1234 return -EINVAL; 1235 } 1236 1237 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1238 disconnected_qpair_cb(qpair, group->ctx); 1239 } 1240 1241 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1242 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1243 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1244 /* Bump the number of completions so this counts as "busy" */ 1245 num_completions++; 1246 continue; 1247 } 1248 1249 local_completions = spdk_nvme_qpair_process_completions(qpair, 1250 completions_per_qpair); 1251 if (local_completions < 0 && error_reason == 0) { 1252 error_reason = local_completions; 1253 } else { 1254 num_completions += local_completions; 1255 assert(num_completions >= 0); 1256 } 1257 } 1258 1259 return error_reason ? error_reason : num_completions; 1260 } 1261 1262 int 1263 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1264 struct spdk_nvme_qpair *qpair) 1265 { 1266 CU_ASSERT(!qpair->is_connected); 1267 1268 qpair->poll_group = group; 1269 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1270 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1271 1272 return 0; 1273 } 1274 1275 int 1276 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1277 struct spdk_nvme_qpair *qpair) 1278 { 1279 CU_ASSERT(!qpair->is_connected); 1280 1281 if (qpair->poll_group == NULL) { 1282 return -ENOENT; 1283 } 1284 1285 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1286 1287 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1288 1289 qpair->poll_group = NULL; 1290 qpair->poll_group_tailq_head = NULL; 1291 1292 return 0; 1293 } 1294 1295 int 1296 spdk_bdev_register(struct spdk_bdev *bdev) 1297 { 1298 g_ut_registered_bdev = bdev; 1299 1300 return g_ut_register_bdev_status; 1301 } 1302 1303 void 1304 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1305 { 1306 int rc; 1307 1308 rc = bdev->fn_table->destruct(bdev->ctxt); 1309 1310 if (bdev == g_ut_registered_bdev) { 1311 g_ut_registered_bdev = NULL; 1312 } 1313 1314 if (rc <= 0 && cb_fn != NULL) { 1315 cb_fn(cb_arg, rc); 1316 } 1317 } 1318 1319 int 1320 spdk_bdev_open_ext(const char *bdev_name, bool write, 1321 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1322 struct spdk_bdev_desc **desc) 1323 { 1324 if (g_ut_registered_bdev == NULL || 1325 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1326 return -ENODEV; 1327 } 1328 1329 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1330 1331 return 0; 1332 } 1333 1334 struct spdk_bdev * 1335 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1336 { 1337 return (struct spdk_bdev *)desc; 1338 } 1339 1340 int 1341 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1342 { 1343 bdev->blockcnt = size; 1344 1345 return 0; 1346 } 1347 1348 struct spdk_io_channel * 1349 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1350 { 1351 return (struct spdk_io_channel *)bdev_io->internal.ch; 1352 } 1353 1354 struct spdk_thread * 1355 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1356 { 1357 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1358 } 1359 1360 void 1361 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1362 { 1363 bdev_io->internal.status = status; 1364 bdev_io->internal.in_submit_request = false; 1365 } 1366 1367 void 1368 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1369 { 1370 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1371 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1372 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1373 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1374 } else { 1375 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1376 } 1377 1378 bdev_io->internal.error.nvme.cdw0 = cdw0; 1379 bdev_io->internal.error.nvme.sct = sct; 1380 bdev_io->internal.error.nvme.sc = sc; 1381 1382 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1383 } 1384 1385 void 1386 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1387 { 1388 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1389 1390 ut_bdev_io_set_buf(bdev_io); 1391 1392 cb(ch, bdev_io, true); 1393 } 1394 1395 static void 1396 test_create_ctrlr(void) 1397 { 1398 struct spdk_nvme_transport_id trid = {}; 1399 struct spdk_nvme_ctrlr ctrlr = {}; 1400 int rc; 1401 1402 ut_init_trid(&trid); 1403 1404 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1405 CU_ASSERT(rc == 0); 1406 1407 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1408 1409 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1410 CU_ASSERT(rc == 0); 1411 1412 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1413 1414 poll_threads(); 1415 spdk_delay_us(1000); 1416 poll_threads(); 1417 1418 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1419 } 1420 1421 static void 1422 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1423 { 1424 bool *detect_remove = cb_arg; 1425 1426 CU_ASSERT(rc != 0); 1427 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1428 1429 *detect_remove = true; 1430 } 1431 1432 static void 1433 test_reset_ctrlr(void) 1434 { 1435 struct spdk_nvme_transport_id trid = {}; 1436 struct spdk_nvme_ctrlr ctrlr = {}; 1437 struct nvme_ctrlr *nvme_ctrlr = NULL; 1438 struct nvme_path_id *curr_trid; 1439 struct spdk_io_channel *ch1, *ch2; 1440 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1441 bool detect_remove; 1442 int rc; 1443 1444 ut_init_trid(&trid); 1445 TAILQ_INIT(&ctrlr.active_io_qpairs); 1446 1447 set_thread(0); 1448 1449 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1450 CU_ASSERT(rc == 0); 1451 1452 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1453 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1454 1455 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1456 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1457 1458 ch1 = spdk_get_io_channel(nvme_ctrlr); 1459 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1460 1461 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1462 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1463 1464 set_thread(1); 1465 1466 ch2 = spdk_get_io_channel(nvme_ctrlr); 1467 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1468 1469 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1470 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1471 1472 /* Reset starts from thread 1. */ 1473 set_thread(1); 1474 1475 /* Case 1: ctrlr is already being destructed. */ 1476 nvme_ctrlr->destruct = true; 1477 1478 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1479 CU_ASSERT(rc == -ENXIO); 1480 1481 /* Case 2: reset is in progress. */ 1482 nvme_ctrlr->destruct = false; 1483 nvme_ctrlr->resetting = true; 1484 1485 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1486 CU_ASSERT(rc == -EBUSY); 1487 1488 /* Case 3: reset completes successfully. */ 1489 nvme_ctrlr->resetting = false; 1490 curr_trid->last_failed_tsc = spdk_get_ticks(); 1491 ctrlr.is_failed = true; 1492 1493 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1494 CU_ASSERT(rc == 0); 1495 CU_ASSERT(nvme_ctrlr->resetting == true); 1496 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1497 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1498 1499 poll_thread_times(0, 3); 1500 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1501 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1502 1503 poll_thread_times(0, 1); 1504 poll_thread_times(1, 1); 1505 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1506 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1507 CU_ASSERT(ctrlr.is_failed == true); 1508 1509 poll_thread_times(1, 1); 1510 poll_thread_times(0, 1); 1511 CU_ASSERT(ctrlr.is_failed == false); 1512 CU_ASSERT(ctrlr.adminq.is_connected == false); 1513 1514 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1515 poll_thread_times(0, 2); 1516 CU_ASSERT(ctrlr.adminq.is_connected == true); 1517 1518 poll_thread_times(0, 1); 1519 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1520 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1521 1522 poll_thread_times(1, 1); 1523 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1524 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1525 CU_ASSERT(nvme_ctrlr->resetting == true); 1526 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1527 1528 poll_thread_times(0, 2); 1529 CU_ASSERT(nvme_ctrlr->resetting == true); 1530 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1531 poll_thread_times(1, 1); 1532 CU_ASSERT(nvme_ctrlr->resetting == true); 1533 poll_thread_times(0, 1); 1534 CU_ASSERT(nvme_ctrlr->resetting == false); 1535 1536 /* Case 4: ctrlr is already removed. */ 1537 ctrlr.is_removed = true; 1538 1539 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1540 CU_ASSERT(rc == 0); 1541 1542 detect_remove = false; 1543 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1544 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1545 1546 poll_threads(); 1547 1548 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1549 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1550 CU_ASSERT(detect_remove == true); 1551 1552 ctrlr.is_removed = false; 1553 1554 spdk_put_io_channel(ch2); 1555 1556 set_thread(0); 1557 1558 spdk_put_io_channel(ch1); 1559 1560 poll_threads(); 1561 1562 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1563 CU_ASSERT(rc == 0); 1564 1565 poll_threads(); 1566 spdk_delay_us(1000); 1567 poll_threads(); 1568 1569 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1570 } 1571 1572 static void 1573 test_race_between_reset_and_destruct_ctrlr(void) 1574 { 1575 struct spdk_nvme_transport_id trid = {}; 1576 struct spdk_nvme_ctrlr ctrlr = {}; 1577 struct nvme_ctrlr *nvme_ctrlr; 1578 struct spdk_io_channel *ch1, *ch2; 1579 int rc; 1580 1581 ut_init_trid(&trid); 1582 TAILQ_INIT(&ctrlr.active_io_qpairs); 1583 1584 set_thread(0); 1585 1586 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1587 CU_ASSERT(rc == 0); 1588 1589 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1590 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1591 1592 ch1 = spdk_get_io_channel(nvme_ctrlr); 1593 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1594 1595 set_thread(1); 1596 1597 ch2 = spdk_get_io_channel(nvme_ctrlr); 1598 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1599 1600 /* Reset starts from thread 1. */ 1601 set_thread(1); 1602 1603 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1604 CU_ASSERT(rc == 0); 1605 CU_ASSERT(nvme_ctrlr->resetting == true); 1606 1607 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1608 set_thread(0); 1609 1610 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1611 CU_ASSERT(rc == 0); 1612 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1613 CU_ASSERT(nvme_ctrlr->destruct == true); 1614 CU_ASSERT(nvme_ctrlr->resetting == true); 1615 1616 poll_threads(); 1617 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1618 poll_threads(); 1619 1620 /* Reset completed but ctrlr is not still destructed yet. */ 1621 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1622 CU_ASSERT(nvme_ctrlr->destruct == true); 1623 CU_ASSERT(nvme_ctrlr->resetting == false); 1624 1625 /* New reset request is rejected. */ 1626 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1627 CU_ASSERT(rc == -ENXIO); 1628 1629 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1630 * However there are two channels and destruct is not completed yet. 1631 */ 1632 poll_threads(); 1633 1634 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1635 1636 set_thread(0); 1637 1638 spdk_put_io_channel(ch1); 1639 1640 set_thread(1); 1641 1642 spdk_put_io_channel(ch2); 1643 1644 poll_threads(); 1645 spdk_delay_us(1000); 1646 poll_threads(); 1647 1648 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1649 } 1650 1651 static void 1652 test_failover_ctrlr(void) 1653 { 1654 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1655 struct spdk_nvme_ctrlr ctrlr = {}; 1656 struct nvme_ctrlr *nvme_ctrlr = NULL; 1657 struct nvme_path_id *curr_trid, *next_trid; 1658 struct spdk_io_channel *ch1, *ch2; 1659 int rc; 1660 1661 ut_init_trid(&trid1); 1662 ut_init_trid2(&trid2); 1663 TAILQ_INIT(&ctrlr.active_io_qpairs); 1664 1665 set_thread(0); 1666 1667 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1668 CU_ASSERT(rc == 0); 1669 1670 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1671 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1672 1673 ch1 = spdk_get_io_channel(nvme_ctrlr); 1674 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1675 1676 set_thread(1); 1677 1678 ch2 = spdk_get_io_channel(nvme_ctrlr); 1679 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1680 1681 /* First, test one trid case. */ 1682 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1683 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1684 1685 /* Failover starts from thread 1. */ 1686 set_thread(1); 1687 1688 /* Case 1: ctrlr is already being destructed. */ 1689 nvme_ctrlr->destruct = true; 1690 1691 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1692 CU_ASSERT(rc == -ENXIO); 1693 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1694 1695 /* Case 2: reset is in progress. */ 1696 nvme_ctrlr->destruct = false; 1697 nvme_ctrlr->resetting = true; 1698 1699 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1700 CU_ASSERT(rc == -EINPROGRESS); 1701 1702 /* Case 3: reset completes successfully. */ 1703 nvme_ctrlr->resetting = false; 1704 1705 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1706 CU_ASSERT(rc == 0); 1707 1708 CU_ASSERT(nvme_ctrlr->resetting == true); 1709 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1710 1711 poll_threads(); 1712 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1713 poll_threads(); 1714 1715 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1716 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1717 1718 CU_ASSERT(nvme_ctrlr->resetting == false); 1719 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1720 1721 set_thread(0); 1722 1723 /* Second, test two trids case. */ 1724 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1725 CU_ASSERT(rc == 0); 1726 1727 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1728 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1729 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1730 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1731 1732 /* Failover starts from thread 1. */ 1733 set_thread(1); 1734 1735 /* Case 4: reset is in progress. */ 1736 nvme_ctrlr->resetting = true; 1737 1738 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1739 CU_ASSERT(rc == -EINPROGRESS); 1740 1741 /* Case 5: failover completes successfully. */ 1742 nvme_ctrlr->resetting = false; 1743 1744 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1745 CU_ASSERT(rc == 0); 1746 1747 CU_ASSERT(nvme_ctrlr->resetting == true); 1748 1749 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1750 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1751 CU_ASSERT(next_trid != curr_trid); 1752 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1753 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1754 1755 poll_threads(); 1756 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1757 poll_threads(); 1758 1759 CU_ASSERT(nvme_ctrlr->resetting == false); 1760 1761 spdk_put_io_channel(ch2); 1762 1763 set_thread(0); 1764 1765 spdk_put_io_channel(ch1); 1766 1767 poll_threads(); 1768 1769 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1770 CU_ASSERT(rc == 0); 1771 1772 poll_threads(); 1773 spdk_delay_us(1000); 1774 poll_threads(); 1775 1776 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1777 } 1778 1779 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1780 * 1781 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1782 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1783 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1784 * have been active, i.e., the head of the list until the failover completed. 1785 * However trid3 was inserted to the head of the list by mistake. 1786 * 1787 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1788 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1789 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1790 * may be executed repeatedly before failover is executed. Hence this bug is real. 1791 * 1792 * The following test verifies the fix. 1793 */ 1794 static void 1795 test_race_between_failover_and_add_secondary_trid(void) 1796 { 1797 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1798 struct spdk_nvme_ctrlr ctrlr = {}; 1799 struct nvme_ctrlr *nvme_ctrlr = NULL; 1800 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1801 struct spdk_io_channel *ch1, *ch2; 1802 int rc; 1803 1804 ut_init_trid(&trid1); 1805 ut_init_trid2(&trid2); 1806 ut_init_trid3(&trid3); 1807 TAILQ_INIT(&ctrlr.active_io_qpairs); 1808 1809 set_thread(0); 1810 1811 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1812 CU_ASSERT(rc == 0); 1813 1814 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1815 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1816 1817 ch1 = spdk_get_io_channel(nvme_ctrlr); 1818 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1819 1820 set_thread(1); 1821 1822 ch2 = spdk_get_io_channel(nvme_ctrlr); 1823 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1824 1825 set_thread(0); 1826 1827 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1828 CU_ASSERT(rc == 0); 1829 1830 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1831 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1832 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1833 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1834 path_id2 = TAILQ_NEXT(path_id1, link); 1835 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1836 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1837 1838 ctrlr.fail_reset = true; 1839 1840 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1841 CU_ASSERT(rc == 0); 1842 1843 poll_threads(); 1844 1845 CU_ASSERT(path_id1->last_failed_tsc != 0); 1846 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1847 1848 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1849 CU_ASSERT(rc == 0); 1850 1851 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1852 CU_ASSERT(rc == 0); 1853 1854 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1855 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1856 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1857 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1858 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1859 path_id3 = TAILQ_NEXT(path_id2, link); 1860 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1861 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1862 1863 poll_threads(); 1864 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1865 poll_threads(); 1866 1867 spdk_put_io_channel(ch1); 1868 1869 set_thread(1); 1870 1871 spdk_put_io_channel(ch2); 1872 1873 poll_threads(); 1874 1875 set_thread(0); 1876 1877 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1878 CU_ASSERT(rc == 0); 1879 1880 poll_threads(); 1881 spdk_delay_us(1000); 1882 poll_threads(); 1883 1884 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1885 } 1886 1887 static void 1888 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1889 { 1890 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1891 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1892 } 1893 1894 static void 1895 test_pending_reset(void) 1896 { 1897 struct spdk_nvme_transport_id trid = {}; 1898 struct spdk_nvme_ctrlr *ctrlr; 1899 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 1900 struct nvme_ctrlr *nvme_ctrlr = NULL; 1901 const int STRING_SIZE = 32; 1902 const char *attached_names[STRING_SIZE]; 1903 struct nvme_bdev *bdev; 1904 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1905 struct spdk_io_channel *ch1, *ch2; 1906 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1907 struct nvme_io_path *io_path1, *io_path2; 1908 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1909 int rc; 1910 1911 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1912 ut_init_trid(&trid); 1913 1914 set_thread(0); 1915 1916 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1917 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1918 1919 g_ut_attach_ctrlr_status = 0; 1920 g_ut_attach_bdev_count = 1; 1921 1922 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1923 attach_ctrlr_done, NULL, &opts, NULL, false); 1924 CU_ASSERT(rc == 0); 1925 1926 spdk_delay_us(1000); 1927 poll_threads(); 1928 1929 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1930 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1931 1932 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1933 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1934 1935 ch1 = spdk_get_io_channel(bdev); 1936 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1937 1938 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1939 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1940 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1941 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1942 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1943 1944 set_thread(1); 1945 1946 ch2 = spdk_get_io_channel(bdev); 1947 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1948 1949 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1950 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1951 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1952 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1953 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1954 1955 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1956 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1957 1958 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1959 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1960 1961 /* The first reset request is submitted on thread 1, and the second reset request 1962 * is submitted on thread 0 while processing the first request. 1963 */ 1964 bdev_nvme_submit_request(ch2, first_bdev_io); 1965 CU_ASSERT(nvme_ctrlr->resetting == true); 1966 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1967 1968 set_thread(0); 1969 1970 bdev_nvme_submit_request(ch1, second_bdev_io); 1971 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1972 1973 poll_threads(); 1974 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1975 poll_threads(); 1976 1977 CU_ASSERT(nvme_ctrlr->resetting == false); 1978 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1979 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1980 1981 /* The first reset request is submitted on thread 1, and the second reset request 1982 * is submitted on thread 0 while processing the first request. 1983 * 1984 * The difference from the above scenario is that the controller is removed while 1985 * processing the first request. Hence both reset requests should fail. 1986 */ 1987 set_thread(1); 1988 1989 bdev_nvme_submit_request(ch2, first_bdev_io); 1990 CU_ASSERT(nvme_ctrlr->resetting == true); 1991 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1992 1993 set_thread(0); 1994 1995 bdev_nvme_submit_request(ch1, second_bdev_io); 1996 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1997 1998 ctrlr->fail_reset = true; 1999 2000 poll_threads(); 2001 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2002 poll_threads(); 2003 2004 CU_ASSERT(nvme_ctrlr->resetting == false); 2005 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2006 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2007 2008 spdk_put_io_channel(ch1); 2009 2010 set_thread(1); 2011 2012 spdk_put_io_channel(ch2); 2013 2014 poll_threads(); 2015 2016 set_thread(0); 2017 2018 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2019 CU_ASSERT(rc == 0); 2020 2021 poll_threads(); 2022 spdk_delay_us(1000); 2023 poll_threads(); 2024 2025 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2026 2027 free(first_bdev_io); 2028 free(second_bdev_io); 2029 } 2030 2031 static void 2032 test_attach_ctrlr(void) 2033 { 2034 struct spdk_nvme_transport_id trid = {}; 2035 struct spdk_nvme_ctrlr *ctrlr; 2036 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2037 struct nvme_ctrlr *nvme_ctrlr; 2038 const int STRING_SIZE = 32; 2039 const char *attached_names[STRING_SIZE]; 2040 struct nvme_bdev *nbdev; 2041 int rc; 2042 2043 set_thread(0); 2044 2045 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2046 ut_init_trid(&trid); 2047 2048 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2049 * by probe polling. 2050 */ 2051 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2052 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2053 2054 ctrlr->is_failed = true; 2055 g_ut_attach_ctrlr_status = -EIO; 2056 g_ut_attach_bdev_count = 0; 2057 2058 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2059 attach_ctrlr_done, NULL, &opts, NULL, false); 2060 CU_ASSERT(rc == 0); 2061 2062 spdk_delay_us(1000); 2063 poll_threads(); 2064 2065 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2066 2067 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2068 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2069 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2070 2071 g_ut_attach_ctrlr_status = 0; 2072 2073 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2074 attach_ctrlr_done, NULL, &opts, NULL, false); 2075 CU_ASSERT(rc == 0); 2076 2077 spdk_delay_us(1000); 2078 poll_threads(); 2079 2080 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2081 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2082 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2083 2084 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2085 CU_ASSERT(rc == 0); 2086 2087 poll_threads(); 2088 spdk_delay_us(1000); 2089 poll_threads(); 2090 2091 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2092 2093 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2094 * one nvme_bdev is created. 2095 */ 2096 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2097 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2098 2099 g_ut_attach_bdev_count = 1; 2100 2101 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2102 attach_ctrlr_done, NULL, &opts, NULL, false); 2103 CU_ASSERT(rc == 0); 2104 2105 spdk_delay_us(1000); 2106 poll_threads(); 2107 2108 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2109 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2110 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2111 2112 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2113 attached_names[0] = NULL; 2114 2115 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2116 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2117 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2118 2119 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2120 CU_ASSERT(rc == 0); 2121 2122 poll_threads(); 2123 spdk_delay_us(1000); 2124 poll_threads(); 2125 2126 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2127 2128 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2129 * created because creating one nvme_bdev failed. 2130 */ 2131 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2132 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2133 2134 g_ut_register_bdev_status = -EINVAL; 2135 g_ut_attach_bdev_count = 0; 2136 2137 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2138 attach_ctrlr_done, NULL, &opts, NULL, false); 2139 CU_ASSERT(rc == 0); 2140 2141 spdk_delay_us(1000); 2142 poll_threads(); 2143 2144 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2145 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2146 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2147 2148 CU_ASSERT(attached_names[0] == NULL); 2149 2150 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2151 CU_ASSERT(rc == 0); 2152 2153 poll_threads(); 2154 spdk_delay_us(1000); 2155 poll_threads(); 2156 2157 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2158 2159 g_ut_register_bdev_status = 0; 2160 } 2161 2162 static void 2163 test_aer_cb(void) 2164 { 2165 struct spdk_nvme_transport_id trid = {}; 2166 struct spdk_nvme_ctrlr *ctrlr; 2167 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2168 struct nvme_ctrlr *nvme_ctrlr; 2169 struct nvme_bdev *bdev; 2170 const int STRING_SIZE = 32; 2171 const char *attached_names[STRING_SIZE]; 2172 union spdk_nvme_async_event_completion event = {}; 2173 struct spdk_nvme_cpl cpl = {}; 2174 int rc; 2175 2176 set_thread(0); 2177 2178 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2179 ut_init_trid(&trid); 2180 2181 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2182 * namespaces are populated. 2183 */ 2184 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2185 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2186 2187 ctrlr->ns[0].is_active = false; 2188 2189 g_ut_attach_ctrlr_status = 0; 2190 g_ut_attach_bdev_count = 3; 2191 2192 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2193 attach_ctrlr_done, NULL, &opts, NULL, false); 2194 CU_ASSERT(rc == 0); 2195 2196 spdk_delay_us(1000); 2197 poll_threads(); 2198 2199 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2200 poll_threads(); 2201 2202 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2203 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2204 2205 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2206 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2207 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2208 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2209 2210 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2211 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2212 CU_ASSERT(bdev->disk.blockcnt == 1024); 2213 2214 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2215 * change the size of the 4th namespace. 2216 */ 2217 ctrlr->ns[0].is_active = true; 2218 ctrlr->ns[2].is_active = false; 2219 ctrlr->nsdata[3].nsze = 2048; 2220 2221 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2222 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2223 cpl.cdw0 = event.raw; 2224 2225 aer_cb(nvme_ctrlr, &cpl); 2226 2227 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2228 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2229 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2230 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2231 CU_ASSERT(bdev->disk.blockcnt == 2048); 2232 2233 /* Change ANA state of active namespaces. */ 2234 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2235 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2236 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2237 2238 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2239 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2240 cpl.cdw0 = event.raw; 2241 2242 aer_cb(nvme_ctrlr, &cpl); 2243 2244 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2245 poll_threads(); 2246 2247 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2248 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2249 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2250 2251 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2252 CU_ASSERT(rc == 0); 2253 2254 poll_threads(); 2255 spdk_delay_us(1000); 2256 poll_threads(); 2257 2258 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2259 } 2260 2261 static void 2262 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2263 enum spdk_bdev_io_type io_type) 2264 { 2265 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2266 struct nvme_io_path *io_path; 2267 struct spdk_nvme_qpair *qpair; 2268 2269 io_path = bdev_nvme_find_io_path(nbdev_ch); 2270 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2271 qpair = io_path->qpair->qpair; 2272 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2273 2274 bdev_io->type = io_type; 2275 bdev_io->internal.in_submit_request = true; 2276 2277 bdev_nvme_submit_request(ch, bdev_io); 2278 2279 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2280 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2281 2282 poll_threads(); 2283 2284 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2285 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2286 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2287 } 2288 2289 static void 2290 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2291 enum spdk_bdev_io_type io_type) 2292 { 2293 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2294 struct nvme_io_path *io_path; 2295 struct spdk_nvme_qpair *qpair; 2296 2297 io_path = bdev_nvme_find_io_path(nbdev_ch); 2298 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2299 qpair = io_path->qpair->qpair; 2300 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2301 2302 bdev_io->type = io_type; 2303 bdev_io->internal.in_submit_request = true; 2304 2305 bdev_nvme_submit_request(ch, bdev_io); 2306 2307 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2308 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2309 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2310 } 2311 2312 static void 2313 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2314 { 2315 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2316 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2317 struct ut_nvme_req *req; 2318 struct nvme_io_path *io_path; 2319 struct spdk_nvme_qpair *qpair; 2320 2321 io_path = bdev_nvme_find_io_path(nbdev_ch); 2322 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2323 qpair = io_path->qpair->qpair; 2324 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2325 2326 /* Only compare and write now. */ 2327 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2328 bdev_io->internal.in_submit_request = true; 2329 2330 bdev_nvme_submit_request(ch, bdev_io); 2331 2332 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2333 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2334 CU_ASSERT(bio->first_fused_submitted == true); 2335 2336 /* First outstanding request is compare operation. */ 2337 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2338 SPDK_CU_ASSERT_FATAL(req != NULL); 2339 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2340 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2341 2342 poll_threads(); 2343 2344 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2345 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2346 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2347 } 2348 2349 static void 2350 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2351 struct spdk_nvme_ctrlr *ctrlr) 2352 { 2353 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2354 bdev_io->internal.in_submit_request = true; 2355 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2356 2357 bdev_nvme_submit_request(ch, bdev_io); 2358 2359 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2360 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2361 2362 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2363 poll_thread_times(1, 1); 2364 2365 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2366 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2367 2368 poll_thread_times(0, 1); 2369 2370 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2371 } 2372 2373 static void 2374 test_submit_nvme_cmd(void) 2375 { 2376 struct spdk_nvme_transport_id trid = {}; 2377 struct spdk_nvme_ctrlr *ctrlr; 2378 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2379 struct nvme_ctrlr *nvme_ctrlr; 2380 const int STRING_SIZE = 32; 2381 const char *attached_names[STRING_SIZE]; 2382 struct nvme_bdev *bdev; 2383 struct spdk_bdev_io *bdev_io; 2384 struct spdk_io_channel *ch; 2385 int rc; 2386 2387 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2388 ut_init_trid(&trid); 2389 2390 set_thread(1); 2391 2392 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2393 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2394 2395 g_ut_attach_ctrlr_status = 0; 2396 g_ut_attach_bdev_count = 1; 2397 2398 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2399 attach_ctrlr_done, NULL, &opts, NULL, false); 2400 CU_ASSERT(rc == 0); 2401 2402 spdk_delay_us(1000); 2403 poll_threads(); 2404 2405 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2406 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2407 2408 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2409 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2410 2411 set_thread(0); 2412 2413 ch = spdk_get_io_channel(bdev); 2414 SPDK_CU_ASSERT_FATAL(ch != NULL); 2415 2416 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2417 2418 bdev_io->u.bdev.iovs = NULL; 2419 2420 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2421 2422 ut_bdev_io_set_buf(bdev_io); 2423 2424 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2425 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2426 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2427 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2428 2429 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2430 2431 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2432 2433 /* Verify that ext NVME API is called when data is described by memory domain */ 2434 g_ut_read_ext_called = false; 2435 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2436 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2437 CU_ASSERT(g_ut_read_ext_called == true); 2438 g_ut_read_ext_called = false; 2439 bdev_io->u.bdev.memory_domain = NULL; 2440 2441 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2442 2443 free(bdev_io); 2444 2445 spdk_put_io_channel(ch); 2446 2447 poll_threads(); 2448 2449 set_thread(1); 2450 2451 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2452 CU_ASSERT(rc == 0); 2453 2454 poll_threads(); 2455 spdk_delay_us(1000); 2456 poll_threads(); 2457 2458 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2459 } 2460 2461 static void 2462 test_add_remove_trid(void) 2463 { 2464 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2465 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2466 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2467 struct nvme_ctrlr *nvme_ctrlr = NULL; 2468 const int STRING_SIZE = 32; 2469 const char *attached_names[STRING_SIZE]; 2470 struct nvme_path_id *ctrid; 2471 int rc; 2472 2473 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2474 ut_init_trid(&path1.trid); 2475 ut_init_trid2(&path2.trid); 2476 ut_init_trid3(&path3.trid); 2477 2478 set_thread(0); 2479 2480 g_ut_attach_ctrlr_status = 0; 2481 g_ut_attach_bdev_count = 0; 2482 2483 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2484 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2485 2486 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2487 attach_ctrlr_done, NULL, &opts, NULL, false); 2488 CU_ASSERT(rc == 0); 2489 2490 spdk_delay_us(1000); 2491 poll_threads(); 2492 2493 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2494 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2495 2496 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2497 2498 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2499 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2500 2501 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2502 attach_ctrlr_done, NULL, &opts, NULL, false); 2503 CU_ASSERT(rc == 0); 2504 2505 spdk_delay_us(1000); 2506 poll_threads(); 2507 2508 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2509 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2510 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2511 break; 2512 } 2513 } 2514 CU_ASSERT(ctrid != NULL); 2515 2516 /* trid3 is not in the registered list. */ 2517 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2518 CU_ASSERT(rc == -ENXIO); 2519 2520 /* trid2 is not used, and simply removed. */ 2521 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2522 CU_ASSERT(rc == 0); 2523 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2524 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2525 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2526 } 2527 2528 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2529 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2530 2531 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2532 attach_ctrlr_done, NULL, &opts, NULL, false); 2533 CU_ASSERT(rc == 0); 2534 2535 spdk_delay_us(1000); 2536 poll_threads(); 2537 2538 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2539 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2540 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2541 break; 2542 } 2543 } 2544 CU_ASSERT(ctrid != NULL); 2545 2546 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2547 * If we add path2 again, path2 should be inserted between path1 and path3. 2548 * Then, we remove path2. It is not used, and simply removed. 2549 */ 2550 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2551 2552 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2553 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2554 2555 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2556 attach_ctrlr_done, NULL, &opts, NULL, false); 2557 CU_ASSERT(rc == 0); 2558 2559 spdk_delay_us(1000); 2560 poll_threads(); 2561 2562 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2563 2564 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2565 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2566 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2567 2568 ctrid = TAILQ_NEXT(ctrid, link); 2569 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2570 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2571 2572 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2573 CU_ASSERT(rc == 0); 2574 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2575 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2576 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2577 } 2578 2579 /* path1 is currently used and path3 is an alternative path. 2580 * If we remove path1, path is changed to path3. 2581 */ 2582 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2583 CU_ASSERT(rc == 0); 2584 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2585 CU_ASSERT(nvme_ctrlr->resetting == true); 2586 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2587 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2588 } 2589 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2590 2591 poll_threads(); 2592 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2593 poll_threads(); 2594 2595 CU_ASSERT(nvme_ctrlr->resetting == false); 2596 2597 /* path3 is the current and only path. If we remove path3, the corresponding 2598 * nvme_ctrlr is removed. 2599 */ 2600 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2601 CU_ASSERT(rc == 0); 2602 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2603 2604 poll_threads(); 2605 spdk_delay_us(1000); 2606 poll_threads(); 2607 2608 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2609 2610 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2611 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2612 2613 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2614 attach_ctrlr_done, NULL, &opts, NULL, false); 2615 CU_ASSERT(rc == 0); 2616 2617 spdk_delay_us(1000); 2618 poll_threads(); 2619 2620 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2621 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2622 2623 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2624 2625 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2626 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2627 2628 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2629 attach_ctrlr_done, NULL, &opts, NULL, false); 2630 CU_ASSERT(rc == 0); 2631 2632 spdk_delay_us(1000); 2633 poll_threads(); 2634 2635 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2636 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2637 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2638 break; 2639 } 2640 } 2641 CU_ASSERT(ctrid != NULL); 2642 2643 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2644 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2645 CU_ASSERT(rc == 0); 2646 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2647 2648 poll_threads(); 2649 spdk_delay_us(1000); 2650 poll_threads(); 2651 2652 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2653 } 2654 2655 static void 2656 test_abort(void) 2657 { 2658 struct spdk_nvme_transport_id trid = {}; 2659 struct nvme_ctrlr_opts opts = {}; 2660 struct spdk_nvme_ctrlr *ctrlr; 2661 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 2662 struct nvme_ctrlr *nvme_ctrlr; 2663 const int STRING_SIZE = 32; 2664 const char *attached_names[STRING_SIZE]; 2665 struct nvme_bdev *bdev; 2666 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2667 struct spdk_io_channel *ch1, *ch2; 2668 struct nvme_bdev_channel *nbdev_ch1; 2669 struct nvme_io_path *io_path1; 2670 struct nvme_qpair *nvme_qpair1; 2671 int rc; 2672 2673 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2674 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2675 * are submitted on thread 1. Both should succeed. 2676 */ 2677 2678 ut_init_trid(&trid); 2679 2680 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2681 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2682 2683 g_ut_attach_ctrlr_status = 0; 2684 g_ut_attach_bdev_count = 1; 2685 2686 set_thread(1); 2687 2688 opts.ctrlr_loss_timeout_sec = -1; 2689 opts.reconnect_delay_sec = 1; 2690 2691 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2692 attach_ctrlr_done, NULL, &dopts, &opts, false); 2693 CU_ASSERT(rc == 0); 2694 2695 spdk_delay_us(1000); 2696 poll_threads(); 2697 2698 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2699 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2700 2701 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2702 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2703 2704 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2705 ut_bdev_io_set_buf(write_io); 2706 2707 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2708 ut_bdev_io_set_buf(fuse_io); 2709 2710 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2711 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2712 2713 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2714 2715 set_thread(0); 2716 2717 ch1 = spdk_get_io_channel(bdev); 2718 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2719 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2720 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2721 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2722 nvme_qpair1 = io_path1->qpair; 2723 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2724 2725 set_thread(1); 2726 2727 ch2 = spdk_get_io_channel(bdev); 2728 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2729 2730 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2731 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2732 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2733 2734 /* Aborting the already completed request should fail. */ 2735 write_io->internal.in_submit_request = true; 2736 bdev_nvme_submit_request(ch1, write_io); 2737 poll_threads(); 2738 2739 CU_ASSERT(write_io->internal.in_submit_request == false); 2740 2741 abort_io->u.abort.bio_to_abort = write_io; 2742 abort_io->internal.in_submit_request = true; 2743 2744 bdev_nvme_submit_request(ch1, abort_io); 2745 2746 poll_threads(); 2747 2748 CU_ASSERT(abort_io->internal.in_submit_request == false); 2749 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2750 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2751 2752 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2753 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2754 2755 admin_io->internal.in_submit_request = true; 2756 bdev_nvme_submit_request(ch1, admin_io); 2757 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2758 poll_threads(); 2759 2760 CU_ASSERT(admin_io->internal.in_submit_request == false); 2761 2762 abort_io->u.abort.bio_to_abort = admin_io; 2763 abort_io->internal.in_submit_request = true; 2764 2765 bdev_nvme_submit_request(ch2, abort_io); 2766 2767 poll_threads(); 2768 2769 CU_ASSERT(abort_io->internal.in_submit_request == false); 2770 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2771 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2772 2773 /* Aborting the write request should succeed. */ 2774 write_io->internal.in_submit_request = true; 2775 bdev_nvme_submit_request(ch1, write_io); 2776 2777 CU_ASSERT(write_io->internal.in_submit_request == true); 2778 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2779 2780 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2781 abort_io->u.abort.bio_to_abort = write_io; 2782 abort_io->internal.in_submit_request = true; 2783 2784 bdev_nvme_submit_request(ch1, abort_io); 2785 2786 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2787 poll_threads(); 2788 2789 CU_ASSERT(abort_io->internal.in_submit_request == false); 2790 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2791 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2792 CU_ASSERT(write_io->internal.in_submit_request == false); 2793 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2794 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2795 2796 /* Aborting the fuse request should succeed. */ 2797 fuse_io->internal.in_submit_request = true; 2798 bdev_nvme_submit_request(ch1, fuse_io); 2799 2800 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2801 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2802 2803 abort_io->u.abort.bio_to_abort = fuse_io; 2804 abort_io->internal.in_submit_request = true; 2805 2806 bdev_nvme_submit_request(ch1, abort_io); 2807 2808 spdk_delay_us(10000); 2809 poll_threads(); 2810 2811 CU_ASSERT(abort_io->internal.in_submit_request == false); 2812 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2813 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2814 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2815 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2816 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2817 2818 /* Aborting the admin request should succeed. */ 2819 admin_io->internal.in_submit_request = true; 2820 bdev_nvme_submit_request(ch1, admin_io); 2821 2822 CU_ASSERT(admin_io->internal.in_submit_request == true); 2823 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2824 2825 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2826 abort_io->u.abort.bio_to_abort = admin_io; 2827 abort_io->internal.in_submit_request = true; 2828 2829 bdev_nvme_submit_request(ch2, abort_io); 2830 2831 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2832 poll_threads(); 2833 2834 CU_ASSERT(abort_io->internal.in_submit_request == false); 2835 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2836 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2837 CU_ASSERT(admin_io->internal.in_submit_request == false); 2838 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2839 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2840 2841 set_thread(0); 2842 2843 /* If qpair is disconnected, it is freed and then reconnected via resetting 2844 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2845 * while resetting the nvme_ctrlr. 2846 */ 2847 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2848 2849 poll_thread_times(0, 3); 2850 2851 CU_ASSERT(nvme_qpair1->qpair == NULL); 2852 CU_ASSERT(nvme_ctrlr->resetting == true); 2853 2854 write_io->internal.in_submit_request = true; 2855 2856 bdev_nvme_submit_request(ch1, write_io); 2857 2858 CU_ASSERT(write_io->internal.in_submit_request == true); 2859 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2860 2861 /* Aborting the queued write request should succeed immediately. */ 2862 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2863 abort_io->u.abort.bio_to_abort = write_io; 2864 abort_io->internal.in_submit_request = true; 2865 2866 bdev_nvme_submit_request(ch1, abort_io); 2867 2868 CU_ASSERT(abort_io->internal.in_submit_request == false); 2869 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2870 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2871 CU_ASSERT(write_io->internal.in_submit_request == false); 2872 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2873 2874 poll_threads(); 2875 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2876 poll_threads(); 2877 2878 spdk_put_io_channel(ch1); 2879 2880 set_thread(1); 2881 2882 spdk_put_io_channel(ch2); 2883 2884 poll_threads(); 2885 2886 free(write_io); 2887 free(fuse_io); 2888 free(admin_io); 2889 free(abort_io); 2890 2891 set_thread(1); 2892 2893 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2894 CU_ASSERT(rc == 0); 2895 2896 poll_threads(); 2897 spdk_delay_us(1000); 2898 poll_threads(); 2899 2900 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2901 } 2902 2903 static void 2904 test_get_io_qpair(void) 2905 { 2906 struct spdk_nvme_transport_id trid = {}; 2907 struct spdk_nvme_ctrlr ctrlr = {}; 2908 struct nvme_ctrlr *nvme_ctrlr = NULL; 2909 struct spdk_io_channel *ch; 2910 struct nvme_ctrlr_channel *ctrlr_ch; 2911 struct spdk_nvme_qpair *qpair; 2912 int rc; 2913 2914 ut_init_trid(&trid); 2915 TAILQ_INIT(&ctrlr.active_io_qpairs); 2916 2917 set_thread(0); 2918 2919 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2920 CU_ASSERT(rc == 0); 2921 2922 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2923 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2924 2925 ch = spdk_get_io_channel(nvme_ctrlr); 2926 SPDK_CU_ASSERT_FATAL(ch != NULL); 2927 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2928 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2929 2930 qpair = bdev_nvme_get_io_qpair(ch); 2931 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2932 2933 spdk_put_io_channel(ch); 2934 2935 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2936 CU_ASSERT(rc == 0); 2937 2938 poll_threads(); 2939 spdk_delay_us(1000); 2940 poll_threads(); 2941 2942 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2943 } 2944 2945 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2946 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2947 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2948 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2949 */ 2950 static void 2951 test_bdev_unregister(void) 2952 { 2953 struct spdk_nvme_transport_id trid = {}; 2954 struct spdk_nvme_ctrlr *ctrlr; 2955 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2956 struct nvme_ctrlr *nvme_ctrlr; 2957 struct nvme_ns *nvme_ns1, *nvme_ns2; 2958 const int STRING_SIZE = 32; 2959 const char *attached_names[STRING_SIZE]; 2960 struct nvme_bdev *bdev1, *bdev2; 2961 int rc; 2962 2963 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2964 ut_init_trid(&trid); 2965 2966 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2967 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2968 2969 g_ut_attach_ctrlr_status = 0; 2970 g_ut_attach_bdev_count = 2; 2971 2972 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2973 attach_ctrlr_done, NULL, &opts, NULL, false); 2974 CU_ASSERT(rc == 0); 2975 2976 spdk_delay_us(1000); 2977 poll_threads(); 2978 2979 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2980 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2981 2982 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2983 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2984 2985 bdev1 = nvme_ns1->bdev; 2986 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2987 2988 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2989 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2990 2991 bdev2 = nvme_ns2->bdev; 2992 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2993 2994 bdev_nvme_destruct(&bdev1->disk); 2995 bdev_nvme_destruct(&bdev2->disk); 2996 2997 poll_threads(); 2998 2999 CU_ASSERT(nvme_ns1->bdev == NULL); 3000 CU_ASSERT(nvme_ns2->bdev == NULL); 3001 3002 nvme_ctrlr->destruct = true; 3003 _nvme_ctrlr_destruct(nvme_ctrlr); 3004 3005 poll_threads(); 3006 spdk_delay_us(1000); 3007 poll_threads(); 3008 3009 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3010 } 3011 3012 static void 3013 test_compare_ns(void) 3014 { 3015 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3016 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3017 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3018 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3019 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3020 3021 /* No IDs are defined. */ 3022 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3023 3024 /* Only EUI64 are defined and not matched. */ 3025 nsdata1.eui64 = 0xABCDEF0123456789; 3026 nsdata2.eui64 = 0xBBCDEF0123456789; 3027 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3028 3029 /* Only EUI64 are defined and matched. */ 3030 nsdata2.eui64 = 0xABCDEF0123456789; 3031 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3032 3033 /* Only NGUID are defined and not matched. */ 3034 nsdata1.eui64 = 0x0; 3035 nsdata2.eui64 = 0x0; 3036 nsdata1.nguid[0] = 0x12; 3037 nsdata2.nguid[0] = 0x10; 3038 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3039 3040 /* Only NGUID are defined and matched. */ 3041 nsdata2.nguid[0] = 0x12; 3042 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3043 3044 /* Only UUID are defined and not matched. */ 3045 nsdata1.nguid[0] = 0x0; 3046 nsdata2.nguid[0] = 0x0; 3047 ns1.uuid = &uuid1; 3048 ns2.uuid = &uuid2; 3049 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3050 3051 /* Only one UUID is defined. */ 3052 ns1.uuid = NULL; 3053 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3054 3055 /* Only UUID are defined and matched. */ 3056 ns1.uuid = &uuid2; 3057 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3058 3059 /* All EUI64, NGUID, and UUID are defined and matched. */ 3060 nsdata1.eui64 = 0x123456789ABCDEF; 3061 nsdata2.eui64 = 0x123456789ABCDEF; 3062 nsdata1.nguid[15] = 0x34; 3063 nsdata2.nguid[15] = 0x34; 3064 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3065 3066 /* CSI are not matched. */ 3067 ns1.csi = SPDK_NVME_CSI_ZNS; 3068 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3069 } 3070 3071 static void 3072 test_init_ana_log_page(void) 3073 { 3074 struct spdk_nvme_transport_id trid = {}; 3075 struct spdk_nvme_ctrlr *ctrlr; 3076 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3077 struct nvme_ctrlr *nvme_ctrlr; 3078 const int STRING_SIZE = 32; 3079 const char *attached_names[STRING_SIZE]; 3080 int rc; 3081 3082 set_thread(0); 3083 3084 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3085 ut_init_trid(&trid); 3086 3087 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3088 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3089 3090 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3091 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3092 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3093 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3094 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3095 3096 g_ut_attach_ctrlr_status = 0; 3097 g_ut_attach_bdev_count = 5; 3098 3099 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3100 attach_ctrlr_done, NULL, &opts, NULL, false); 3101 CU_ASSERT(rc == 0); 3102 3103 spdk_delay_us(1000); 3104 poll_threads(); 3105 3106 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3107 poll_threads(); 3108 3109 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3110 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3111 3112 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3113 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3114 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3115 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3116 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3117 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3118 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3119 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3120 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3121 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3122 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3123 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3124 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3125 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3126 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3127 3128 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3129 CU_ASSERT(rc == 0); 3130 3131 poll_threads(); 3132 spdk_delay_us(1000); 3133 poll_threads(); 3134 3135 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3136 } 3137 3138 static void 3139 init_accel(void) 3140 { 3141 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3142 sizeof(int), "accel_p"); 3143 } 3144 3145 static void 3146 fini_accel(void) 3147 { 3148 spdk_io_device_unregister(g_accel_p, NULL); 3149 } 3150 3151 static void 3152 test_get_memory_domains(void) 3153 { 3154 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3155 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3156 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3157 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3158 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3159 struct spdk_memory_domain *domains[4] = {}; 3160 int rc = 0; 3161 3162 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3163 3164 /* nvme controller doesn't have memory domains */ 3165 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3166 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3167 CU_ASSERT(rc == 0); 3168 CU_ASSERT(domains[0] == NULL); 3169 CU_ASSERT(domains[1] == NULL); 3170 3171 /* nvme controller has a memory domain */ 3172 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3173 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3174 CU_ASSERT(rc == 1); 3175 CU_ASSERT(domains[0] != NULL); 3176 memset(domains, 0, sizeof(domains)); 3177 3178 /* multipath, 2 controllers report 1 memory domain each */ 3179 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3180 3181 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3182 CU_ASSERT(rc == 2); 3183 CU_ASSERT(domains[0] != NULL); 3184 CU_ASSERT(domains[1] != NULL); 3185 memset(domains, 0, sizeof(domains)); 3186 3187 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3188 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3189 CU_ASSERT(rc == 2); 3190 3191 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3192 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3193 CU_ASSERT(rc == 2); 3194 CU_ASSERT(domains[0] == NULL); 3195 CU_ASSERT(domains[1] == NULL); 3196 3197 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3198 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3199 CU_ASSERT(rc == 2); 3200 CU_ASSERT(domains[0] != NULL); 3201 CU_ASSERT(domains[1] == NULL); 3202 memset(domains, 0, sizeof(domains)); 3203 3204 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3205 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3206 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3207 CU_ASSERT(rc == 4); 3208 CU_ASSERT(domains[0] != NULL); 3209 CU_ASSERT(domains[1] != NULL); 3210 CU_ASSERT(domains[2] != NULL); 3211 CU_ASSERT(domains[3] != NULL); 3212 memset(domains, 0, sizeof(domains)); 3213 3214 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3215 * Array size is less than the number of memory domains */ 3216 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3217 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3218 CU_ASSERT(rc == 4); 3219 CU_ASSERT(domains[0] != NULL); 3220 CU_ASSERT(domains[1] != NULL); 3221 CU_ASSERT(domains[2] != NULL); 3222 CU_ASSERT(domains[3] == NULL); 3223 memset(domains, 0, sizeof(domains)); 3224 3225 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3226 } 3227 3228 static void 3229 test_reconnect_qpair(void) 3230 { 3231 struct spdk_nvme_transport_id trid = {}; 3232 struct spdk_nvme_ctrlr *ctrlr; 3233 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3234 struct nvme_ctrlr *nvme_ctrlr; 3235 const int STRING_SIZE = 32; 3236 const char *attached_names[STRING_SIZE]; 3237 struct nvme_bdev *bdev; 3238 struct spdk_io_channel *ch1, *ch2; 3239 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3240 struct nvme_io_path *io_path1, *io_path2; 3241 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3242 int rc; 3243 3244 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3245 ut_init_trid(&trid); 3246 3247 set_thread(0); 3248 3249 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3250 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3251 3252 g_ut_attach_ctrlr_status = 0; 3253 g_ut_attach_bdev_count = 1; 3254 3255 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3256 attach_ctrlr_done, NULL, &opts, NULL, false); 3257 CU_ASSERT(rc == 0); 3258 3259 spdk_delay_us(1000); 3260 poll_threads(); 3261 3262 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3263 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3264 3265 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3266 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3267 3268 ch1 = spdk_get_io_channel(bdev); 3269 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3270 3271 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3272 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3273 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3274 nvme_qpair1 = io_path1->qpair; 3275 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3276 3277 set_thread(1); 3278 3279 ch2 = spdk_get_io_channel(bdev); 3280 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3281 3282 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3283 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3284 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3285 nvme_qpair2 = io_path2->qpair; 3286 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3287 3288 /* If a qpair is disconnected, it is freed and then reconnected via 3289 * resetting the corresponding nvme_ctrlr. 3290 */ 3291 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3292 ctrlr->is_failed = true; 3293 3294 poll_thread_times(1, 3); 3295 CU_ASSERT(nvme_qpair1->qpair != NULL); 3296 CU_ASSERT(nvme_qpair2->qpair == NULL); 3297 CU_ASSERT(nvme_ctrlr->resetting == true); 3298 3299 poll_thread_times(0, 3); 3300 CU_ASSERT(nvme_qpair1->qpair == NULL); 3301 CU_ASSERT(nvme_qpair2->qpair == NULL); 3302 CU_ASSERT(ctrlr->is_failed == true); 3303 3304 poll_thread_times(1, 2); 3305 poll_thread_times(0, 1); 3306 CU_ASSERT(ctrlr->is_failed == false); 3307 CU_ASSERT(ctrlr->adminq.is_connected == false); 3308 3309 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3310 poll_thread_times(0, 2); 3311 CU_ASSERT(ctrlr->adminq.is_connected == true); 3312 3313 poll_thread_times(0, 1); 3314 poll_thread_times(1, 1); 3315 CU_ASSERT(nvme_qpair1->qpair != NULL); 3316 CU_ASSERT(nvme_qpair2->qpair != NULL); 3317 CU_ASSERT(nvme_ctrlr->resetting == true); 3318 3319 poll_thread_times(0, 2); 3320 poll_thread_times(1, 1); 3321 poll_thread_times(0, 1); 3322 CU_ASSERT(nvme_ctrlr->resetting == false); 3323 3324 poll_threads(); 3325 3326 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3327 * fails, the qpair is just freed. 3328 */ 3329 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3330 ctrlr->is_failed = true; 3331 ctrlr->fail_reset = true; 3332 3333 poll_thread_times(1, 3); 3334 CU_ASSERT(nvme_qpair1->qpair != NULL); 3335 CU_ASSERT(nvme_qpair2->qpair == NULL); 3336 CU_ASSERT(nvme_ctrlr->resetting == true); 3337 3338 poll_thread_times(0, 3); 3339 poll_thread_times(1, 1); 3340 CU_ASSERT(nvme_qpair1->qpair == NULL); 3341 CU_ASSERT(nvme_qpair2->qpair == NULL); 3342 CU_ASSERT(ctrlr->is_failed == true); 3343 3344 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3345 poll_thread_times(0, 3); 3346 poll_thread_times(1, 1); 3347 poll_thread_times(0, 1); 3348 CU_ASSERT(ctrlr->is_failed == true); 3349 CU_ASSERT(nvme_ctrlr->resetting == false); 3350 CU_ASSERT(nvme_qpair1->qpair == NULL); 3351 CU_ASSERT(nvme_qpair2->qpair == NULL); 3352 3353 poll_threads(); 3354 3355 spdk_put_io_channel(ch2); 3356 3357 set_thread(0); 3358 3359 spdk_put_io_channel(ch1); 3360 3361 poll_threads(); 3362 3363 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3364 CU_ASSERT(rc == 0); 3365 3366 poll_threads(); 3367 spdk_delay_us(1000); 3368 poll_threads(); 3369 3370 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3371 } 3372 3373 static void 3374 test_create_bdev_ctrlr(void) 3375 { 3376 struct nvme_path_id path1 = {}, path2 = {}; 3377 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3378 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3379 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3380 const int STRING_SIZE = 32; 3381 const char *attached_names[STRING_SIZE]; 3382 int rc; 3383 3384 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3385 ut_init_trid(&path1.trid); 3386 ut_init_trid2(&path2.trid); 3387 3388 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3389 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3390 3391 g_ut_attach_ctrlr_status = 0; 3392 g_ut_attach_bdev_count = 0; 3393 3394 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3395 attach_ctrlr_done, NULL, &opts, NULL, true); 3396 CU_ASSERT(rc == 0); 3397 3398 spdk_delay_us(1000); 3399 poll_threads(); 3400 3401 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3402 poll_threads(); 3403 3404 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3405 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3406 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3407 3408 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3409 g_ut_attach_ctrlr_status = -EINVAL; 3410 3411 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3412 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3413 3414 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3415 3416 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3417 attach_ctrlr_done, NULL, &opts, NULL, true); 3418 CU_ASSERT(rc == 0); 3419 3420 spdk_delay_us(1000); 3421 poll_threads(); 3422 3423 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3424 poll_threads(); 3425 3426 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3427 3428 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3429 g_ut_attach_ctrlr_status = 0; 3430 3431 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3432 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3433 3434 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3435 attach_ctrlr_done, NULL, &opts, NULL, true); 3436 CU_ASSERT(rc == 0); 3437 3438 spdk_delay_us(1000); 3439 poll_threads(); 3440 3441 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3442 poll_threads(); 3443 3444 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3445 3446 /* Delete two ctrlrs at once. */ 3447 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3448 CU_ASSERT(rc == 0); 3449 3450 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3451 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3452 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3453 3454 poll_threads(); 3455 spdk_delay_us(1000); 3456 poll_threads(); 3457 3458 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3459 3460 /* Add two ctrlrs and delete one by one. */ 3461 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3462 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3463 3464 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3465 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3466 3467 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3468 attach_ctrlr_done, NULL, &opts, NULL, true); 3469 CU_ASSERT(rc == 0); 3470 3471 spdk_delay_us(1000); 3472 poll_threads(); 3473 3474 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3475 poll_threads(); 3476 3477 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3478 attach_ctrlr_done, NULL, &opts, NULL, true); 3479 CU_ASSERT(rc == 0); 3480 3481 spdk_delay_us(1000); 3482 poll_threads(); 3483 3484 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3485 poll_threads(); 3486 3487 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3488 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3489 3490 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3491 CU_ASSERT(rc == 0); 3492 3493 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3494 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3495 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3496 3497 poll_threads(); 3498 spdk_delay_us(1000); 3499 poll_threads(); 3500 3501 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3502 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3503 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3504 3505 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3506 CU_ASSERT(rc == 0); 3507 3508 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3509 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3510 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3511 3512 poll_threads(); 3513 spdk_delay_us(1000); 3514 poll_threads(); 3515 3516 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3517 } 3518 3519 static struct nvme_ns * 3520 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3521 { 3522 struct nvme_ns *nvme_ns; 3523 3524 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3525 if (nvme_ns->ctrlr == nvme_ctrlr) { 3526 return nvme_ns; 3527 } 3528 } 3529 3530 return NULL; 3531 } 3532 3533 static void 3534 test_add_multi_ns_to_bdev(void) 3535 { 3536 struct nvme_path_id path1 = {}, path2 = {}; 3537 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3538 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3539 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3540 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3541 struct nvme_ns *nvme_ns1, *nvme_ns2; 3542 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3543 const int STRING_SIZE = 32; 3544 const char *attached_names[STRING_SIZE]; 3545 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3546 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3547 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3548 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3549 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3550 int rc; 3551 3552 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3553 ut_init_trid(&path1.trid); 3554 ut_init_trid2(&path2.trid); 3555 3556 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3557 3558 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3559 * namespaces are populated. 3560 */ 3561 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3562 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3563 3564 ctrlr1->ns[1].is_active = false; 3565 ctrlr1->ns[4].is_active = false; 3566 ctrlr1->ns[0].uuid = &uuid1; 3567 ctrlr1->ns[2].uuid = &uuid3; 3568 ctrlr1->ns[3].uuid = &uuid4; 3569 3570 g_ut_attach_ctrlr_status = 0; 3571 g_ut_attach_bdev_count = 3; 3572 3573 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3574 attach_ctrlr_done, NULL, &opts, NULL, true); 3575 CU_ASSERT(rc == 0); 3576 3577 spdk_delay_us(1000); 3578 poll_threads(); 3579 3580 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3581 poll_threads(); 3582 3583 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3584 * namespaces are populated. The uuid of 4th namespace is different, and hence 3585 * adding 4th namespace to a bdev should fail. 3586 */ 3587 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3588 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3589 3590 ctrlr2->ns[2].is_active = false; 3591 ctrlr2->ns[4].is_active = false; 3592 ctrlr2->ns[0].uuid = &uuid1; 3593 ctrlr2->ns[1].uuid = &uuid2; 3594 ctrlr2->ns[3].uuid = &uuid44; 3595 3596 g_ut_attach_ctrlr_status = 0; 3597 g_ut_attach_bdev_count = 2; 3598 3599 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3600 attach_ctrlr_done, NULL, &opts, NULL, true); 3601 CU_ASSERT(rc == 0); 3602 3603 spdk_delay_us(1000); 3604 poll_threads(); 3605 3606 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3607 poll_threads(); 3608 3609 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3610 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3611 3612 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3613 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3614 3615 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3616 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3617 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3618 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3619 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3620 3621 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3622 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3623 3624 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3625 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3626 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3627 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3628 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3629 3630 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3631 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3632 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3633 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3634 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3635 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3636 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3637 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3638 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3639 3640 CU_ASSERT(bdev1->ref == 2); 3641 CU_ASSERT(bdev2->ref == 1); 3642 CU_ASSERT(bdev3->ref == 1); 3643 CU_ASSERT(bdev4->ref == 1); 3644 3645 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3646 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3647 CU_ASSERT(rc == 0); 3648 3649 poll_threads(); 3650 spdk_delay_us(1000); 3651 poll_threads(); 3652 3653 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3654 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3655 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2); 3656 3657 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3658 CU_ASSERT(rc == 0); 3659 3660 poll_threads(); 3661 spdk_delay_us(1000); 3662 poll_threads(); 3663 3664 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3665 3666 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3667 * can be deleted when the bdev subsystem shutdown. 3668 */ 3669 g_ut_attach_bdev_count = 1; 3670 3671 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3672 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3673 3674 ctrlr1->ns[0].uuid = &uuid1; 3675 3676 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3677 attach_ctrlr_done, NULL, &opts, NULL, true); 3678 CU_ASSERT(rc == 0); 3679 3680 spdk_delay_us(1000); 3681 poll_threads(); 3682 3683 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3684 poll_threads(); 3685 3686 ut_init_trid2(&path2.trid); 3687 3688 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3689 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3690 3691 ctrlr2->ns[0].uuid = &uuid1; 3692 3693 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3694 attach_ctrlr_done, NULL, &opts, NULL, true); 3695 CU_ASSERT(rc == 0); 3696 3697 spdk_delay_us(1000); 3698 poll_threads(); 3699 3700 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3701 poll_threads(); 3702 3703 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3704 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3705 3706 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3707 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3708 3709 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3710 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3711 3712 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3713 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3714 3715 /* Check if a nvme_bdev has two nvme_ns. */ 3716 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3717 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3718 CU_ASSERT(nvme_ns1->bdev == bdev1); 3719 3720 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3721 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3722 CU_ASSERT(nvme_ns2->bdev == bdev1); 3723 3724 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3725 bdev_nvme_destruct(&bdev1->disk); 3726 3727 poll_threads(); 3728 3729 CU_ASSERT(nvme_ns1->bdev == NULL); 3730 CU_ASSERT(nvme_ns2->bdev == NULL); 3731 3732 nvme_ctrlr1->destruct = true; 3733 _nvme_ctrlr_destruct(nvme_ctrlr1); 3734 3735 poll_threads(); 3736 spdk_delay_us(1000); 3737 poll_threads(); 3738 3739 nvme_ctrlr2->destruct = true; 3740 _nvme_ctrlr_destruct(nvme_ctrlr2); 3741 3742 poll_threads(); 3743 spdk_delay_us(1000); 3744 poll_threads(); 3745 3746 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3747 } 3748 3749 static void 3750 test_add_multi_io_paths_to_nbdev_ch(void) 3751 { 3752 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3753 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3754 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3755 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3756 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3757 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3758 const int STRING_SIZE = 32; 3759 const char *attached_names[STRING_SIZE]; 3760 struct nvme_bdev *bdev; 3761 struct spdk_io_channel *ch; 3762 struct nvme_bdev_channel *nbdev_ch; 3763 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3764 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3765 int rc; 3766 3767 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3768 ut_init_trid(&path1.trid); 3769 ut_init_trid2(&path2.trid); 3770 ut_init_trid3(&path3.trid); 3771 g_ut_attach_ctrlr_status = 0; 3772 g_ut_attach_bdev_count = 1; 3773 3774 set_thread(1); 3775 3776 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3777 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3778 3779 ctrlr1->ns[0].uuid = &uuid1; 3780 3781 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3782 attach_ctrlr_done, NULL, &opts, NULL, true); 3783 CU_ASSERT(rc == 0); 3784 3785 spdk_delay_us(1000); 3786 poll_threads(); 3787 3788 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3789 poll_threads(); 3790 3791 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3792 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3793 3794 ctrlr2->ns[0].uuid = &uuid1; 3795 3796 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3797 attach_ctrlr_done, NULL, &opts, NULL, true); 3798 CU_ASSERT(rc == 0); 3799 3800 spdk_delay_us(1000); 3801 poll_threads(); 3802 3803 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3804 poll_threads(); 3805 3806 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3807 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3808 3809 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3810 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3811 3812 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3813 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3814 3815 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3816 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3817 3818 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3819 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3820 3821 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3822 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3823 3824 set_thread(0); 3825 3826 ch = spdk_get_io_channel(bdev); 3827 SPDK_CU_ASSERT_FATAL(ch != NULL); 3828 nbdev_ch = spdk_io_channel_get_ctx(ch); 3829 3830 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3831 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3832 3833 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3834 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3835 3836 set_thread(1); 3837 3838 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3839 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3840 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3841 3842 ctrlr3->ns[0].uuid = &uuid1; 3843 3844 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3845 attach_ctrlr_done, NULL, &opts, NULL, true); 3846 CU_ASSERT(rc == 0); 3847 3848 spdk_delay_us(1000); 3849 poll_threads(); 3850 3851 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3852 poll_threads(); 3853 3854 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn); 3855 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3856 3857 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3858 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3859 3860 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3861 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3862 3863 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3864 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3865 CU_ASSERT(rc == 0); 3866 3867 poll_threads(); 3868 spdk_delay_us(1000); 3869 poll_threads(); 3870 3871 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1); 3872 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3873 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3); 3874 3875 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3876 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3877 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3878 3879 set_thread(0); 3880 3881 spdk_put_io_channel(ch); 3882 3883 poll_threads(); 3884 3885 set_thread(1); 3886 3887 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3888 CU_ASSERT(rc == 0); 3889 3890 poll_threads(); 3891 spdk_delay_us(1000); 3892 poll_threads(); 3893 3894 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3895 } 3896 3897 static void 3898 test_admin_path(void) 3899 { 3900 struct nvme_path_id path1 = {}, path2 = {}; 3901 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3902 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3903 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3904 const int STRING_SIZE = 32; 3905 const char *attached_names[STRING_SIZE]; 3906 struct nvme_bdev *bdev; 3907 struct spdk_io_channel *ch; 3908 struct spdk_bdev_io *bdev_io; 3909 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3910 int rc; 3911 3912 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3913 ut_init_trid(&path1.trid); 3914 ut_init_trid2(&path2.trid); 3915 g_ut_attach_ctrlr_status = 0; 3916 g_ut_attach_bdev_count = 1; 3917 3918 set_thread(0); 3919 3920 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3921 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3922 3923 ctrlr1->ns[0].uuid = &uuid1; 3924 3925 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3926 attach_ctrlr_done, NULL, &opts, NULL, true); 3927 CU_ASSERT(rc == 0); 3928 3929 spdk_delay_us(1000); 3930 poll_threads(); 3931 3932 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3933 poll_threads(); 3934 3935 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3936 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3937 3938 ctrlr2->ns[0].uuid = &uuid1; 3939 3940 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3941 attach_ctrlr_done, NULL, &opts, NULL, true); 3942 CU_ASSERT(rc == 0); 3943 3944 spdk_delay_us(1000); 3945 poll_threads(); 3946 3947 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3948 poll_threads(); 3949 3950 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3951 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3952 3953 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3954 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3955 3956 ch = spdk_get_io_channel(bdev); 3957 SPDK_CU_ASSERT_FATAL(ch != NULL); 3958 3959 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3960 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3961 3962 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3963 * submitted to ctrlr2. 3964 */ 3965 ctrlr1->is_failed = true; 3966 bdev_io->internal.in_submit_request = true; 3967 3968 bdev_nvme_submit_request(ch, bdev_io); 3969 3970 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3971 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3972 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3973 3974 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3975 poll_threads(); 3976 3977 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3978 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3979 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3980 3981 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3982 ctrlr2->is_failed = true; 3983 bdev_io->internal.in_submit_request = true; 3984 3985 bdev_nvme_submit_request(ch, bdev_io); 3986 3987 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3988 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3989 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3990 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3991 3992 free(bdev_io); 3993 3994 spdk_put_io_channel(ch); 3995 3996 poll_threads(); 3997 3998 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3999 CU_ASSERT(rc == 0); 4000 4001 poll_threads(); 4002 spdk_delay_us(1000); 4003 poll_threads(); 4004 4005 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4006 } 4007 4008 static struct nvme_io_path * 4009 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 4010 struct nvme_ctrlr *nvme_ctrlr) 4011 { 4012 struct nvme_io_path *io_path; 4013 4014 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 4015 if (io_path->qpair->ctrlr == nvme_ctrlr) { 4016 return io_path; 4017 } 4018 } 4019 4020 return NULL; 4021 } 4022 4023 static void 4024 test_reset_bdev_ctrlr(void) 4025 { 4026 struct nvme_path_id path1 = {}, path2 = {}; 4027 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4028 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4029 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4030 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4031 struct nvme_path_id *curr_path1, *curr_path2; 4032 const int STRING_SIZE = 32; 4033 const char *attached_names[STRING_SIZE]; 4034 struct nvme_bdev *bdev; 4035 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4036 struct nvme_bdev_io *first_bio; 4037 struct spdk_io_channel *ch1, *ch2; 4038 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4039 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4040 int rc; 4041 4042 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4043 ut_init_trid(&path1.trid); 4044 ut_init_trid2(&path2.trid); 4045 g_ut_attach_ctrlr_status = 0; 4046 g_ut_attach_bdev_count = 1; 4047 4048 set_thread(0); 4049 4050 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4051 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4052 4053 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4054 attach_ctrlr_done, NULL, &opts, NULL, true); 4055 CU_ASSERT(rc == 0); 4056 4057 spdk_delay_us(1000); 4058 poll_threads(); 4059 4060 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4061 poll_threads(); 4062 4063 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4064 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4065 4066 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4067 attach_ctrlr_done, NULL, &opts, NULL, true); 4068 CU_ASSERT(rc == 0); 4069 4070 spdk_delay_us(1000); 4071 poll_threads(); 4072 4073 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4074 poll_threads(); 4075 4076 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4077 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4078 4079 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4080 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4081 4082 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4083 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4084 4085 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4086 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4087 4088 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4089 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4090 4091 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4092 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4093 4094 set_thread(0); 4095 4096 ch1 = spdk_get_io_channel(bdev); 4097 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4098 4099 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4100 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4101 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4102 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4103 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4104 4105 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4106 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4107 4108 set_thread(1); 4109 4110 ch2 = spdk_get_io_channel(bdev); 4111 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4112 4113 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4114 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4115 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4116 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4117 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4118 4119 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4120 4121 /* The first reset request from bdev_io is submitted on thread 0. 4122 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4123 * 4124 * A few extra polls are necessary after resetting ctrlr1 to check 4125 * pending reset requests for ctrlr1. 4126 */ 4127 ctrlr1->is_failed = true; 4128 curr_path1->last_failed_tsc = spdk_get_ticks(); 4129 ctrlr2->is_failed = true; 4130 curr_path2->last_failed_tsc = spdk_get_ticks(); 4131 4132 set_thread(0); 4133 4134 bdev_nvme_submit_request(ch1, first_bdev_io); 4135 CU_ASSERT(first_bio->io_path == io_path11); 4136 CU_ASSERT(nvme_ctrlr1->resetting == true); 4137 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4138 4139 poll_thread_times(0, 3); 4140 CU_ASSERT(io_path11->qpair->qpair == NULL); 4141 CU_ASSERT(io_path21->qpair->qpair != NULL); 4142 4143 poll_thread_times(1, 2); 4144 CU_ASSERT(io_path11->qpair->qpair == NULL); 4145 CU_ASSERT(io_path21->qpair->qpair == NULL); 4146 CU_ASSERT(ctrlr1->is_failed == true); 4147 4148 poll_thread_times(0, 1); 4149 CU_ASSERT(nvme_ctrlr1->resetting == true); 4150 CU_ASSERT(ctrlr1->is_failed == false); 4151 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4152 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4153 4154 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4155 poll_thread_times(0, 2); 4156 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4157 4158 poll_thread_times(0, 1); 4159 CU_ASSERT(io_path11->qpair->qpair != NULL); 4160 CU_ASSERT(io_path21->qpair->qpair == NULL); 4161 4162 poll_thread_times(1, 1); 4163 CU_ASSERT(io_path11->qpair->qpair != NULL); 4164 CU_ASSERT(io_path21->qpair->qpair != NULL); 4165 4166 poll_thread_times(0, 2); 4167 CU_ASSERT(nvme_ctrlr1->resetting == true); 4168 poll_thread_times(1, 1); 4169 CU_ASSERT(nvme_ctrlr1->resetting == true); 4170 poll_thread_times(0, 2); 4171 CU_ASSERT(nvme_ctrlr1->resetting == false); 4172 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4173 CU_ASSERT(first_bio->io_path == io_path12); 4174 CU_ASSERT(nvme_ctrlr2->resetting == true); 4175 4176 poll_thread_times(0, 3); 4177 CU_ASSERT(io_path12->qpair->qpair == NULL); 4178 CU_ASSERT(io_path22->qpair->qpair != NULL); 4179 4180 poll_thread_times(1, 2); 4181 CU_ASSERT(io_path12->qpair->qpair == NULL); 4182 CU_ASSERT(io_path22->qpair->qpair == NULL); 4183 CU_ASSERT(ctrlr2->is_failed == true); 4184 4185 poll_thread_times(0, 1); 4186 CU_ASSERT(nvme_ctrlr2->resetting == true); 4187 CU_ASSERT(ctrlr2->is_failed == false); 4188 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4189 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4190 4191 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4192 poll_thread_times(0, 2); 4193 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4194 4195 poll_thread_times(0, 1); 4196 CU_ASSERT(io_path12->qpair->qpair != NULL); 4197 CU_ASSERT(io_path22->qpair->qpair == NULL); 4198 4199 poll_thread_times(1, 2); 4200 CU_ASSERT(io_path12->qpair->qpair != NULL); 4201 CU_ASSERT(io_path22->qpair->qpair != NULL); 4202 4203 poll_thread_times(0, 2); 4204 CU_ASSERT(nvme_ctrlr2->resetting == true); 4205 poll_thread_times(1, 1); 4206 CU_ASSERT(nvme_ctrlr2->resetting == true); 4207 poll_thread_times(0, 2); 4208 CU_ASSERT(first_bio->io_path == NULL); 4209 CU_ASSERT(nvme_ctrlr2->resetting == false); 4210 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4211 4212 poll_threads(); 4213 4214 /* There is a race between two reset requests from bdev_io. 4215 * 4216 * The first reset request is submitted on thread 0, and the second reset 4217 * request is submitted on thread 1 while the first is resetting ctrlr1. 4218 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4219 * both reset requests go to ctrlr2. The first comes earlier than the second. 4220 * The second is pending on ctrlr2 again. After the first completes resetting 4221 * ctrl2, both complete successfully. 4222 */ 4223 ctrlr1->is_failed = true; 4224 curr_path1->last_failed_tsc = spdk_get_ticks(); 4225 ctrlr2->is_failed = true; 4226 curr_path2->last_failed_tsc = spdk_get_ticks(); 4227 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4228 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4229 4230 set_thread(0); 4231 4232 bdev_nvme_submit_request(ch1, first_bdev_io); 4233 4234 set_thread(1); 4235 4236 bdev_nvme_submit_request(ch2, second_bdev_io); 4237 4238 CU_ASSERT(nvme_ctrlr1->resetting == true); 4239 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4240 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4241 4242 poll_threads(); 4243 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4244 poll_threads(); 4245 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4246 poll_threads(); 4247 4248 CU_ASSERT(ctrlr1->is_failed == false); 4249 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4250 CU_ASSERT(ctrlr2->is_failed == false); 4251 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4252 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4253 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4254 4255 set_thread(0); 4256 4257 spdk_put_io_channel(ch1); 4258 4259 set_thread(1); 4260 4261 spdk_put_io_channel(ch2); 4262 4263 poll_threads(); 4264 4265 set_thread(0); 4266 4267 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4268 CU_ASSERT(rc == 0); 4269 4270 poll_threads(); 4271 spdk_delay_us(1000); 4272 poll_threads(); 4273 4274 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4275 4276 free(first_bdev_io); 4277 free(second_bdev_io); 4278 } 4279 4280 static void 4281 test_find_io_path(void) 4282 { 4283 struct nvme_bdev_channel nbdev_ch = { 4284 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4285 }; 4286 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4287 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4288 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4289 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4290 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4291 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4292 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4293 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4294 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4295 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4296 4297 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4298 4299 /* Test if io_path whose ANA state is not accessible is excluded. */ 4300 4301 nvme_qpair1.qpair = &qpair1; 4302 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4303 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4304 4305 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4306 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4307 4308 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4309 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4310 4311 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4312 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4313 4314 nbdev_ch.current_io_path = NULL; 4315 4316 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4317 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4318 4319 nbdev_ch.current_io_path = NULL; 4320 4321 /* Test if io_path whose qpair is resetting is excluded. */ 4322 4323 nvme_qpair1.qpair = NULL; 4324 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4325 4326 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4327 4328 /* Test if ANA optimized state or the first found ANA non-optimized state 4329 * is prioritized. 4330 */ 4331 4332 nvme_qpair1.qpair = &qpair1; 4333 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4334 nvme_qpair2.qpair = &qpair2; 4335 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4336 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4337 4338 nbdev_ch.current_io_path = NULL; 4339 4340 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4341 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4342 4343 nbdev_ch.current_io_path = NULL; 4344 } 4345 4346 static void 4347 test_retry_io_if_ana_state_is_updating(void) 4348 { 4349 struct nvme_path_id path = {}; 4350 struct nvme_ctrlr_opts opts = {}; 4351 struct spdk_nvme_ctrlr *ctrlr; 4352 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 4353 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4354 struct nvme_ctrlr *nvme_ctrlr; 4355 const int STRING_SIZE = 32; 4356 const char *attached_names[STRING_SIZE]; 4357 struct nvme_bdev *bdev; 4358 struct nvme_ns *nvme_ns; 4359 struct spdk_bdev_io *bdev_io1; 4360 struct spdk_io_channel *ch; 4361 struct nvme_bdev_channel *nbdev_ch; 4362 struct nvme_io_path *io_path; 4363 struct nvme_qpair *nvme_qpair; 4364 int rc; 4365 4366 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4367 ut_init_trid(&path.trid); 4368 4369 set_thread(0); 4370 4371 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4372 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4373 4374 g_ut_attach_ctrlr_status = 0; 4375 g_ut_attach_bdev_count = 1; 4376 4377 opts.ctrlr_loss_timeout_sec = -1; 4378 opts.reconnect_delay_sec = 1; 4379 4380 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4381 attach_ctrlr_done, NULL, &dopts, &opts, false); 4382 CU_ASSERT(rc == 0); 4383 4384 spdk_delay_us(1000); 4385 poll_threads(); 4386 4387 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4388 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4389 4390 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 4391 CU_ASSERT(nvme_ctrlr != NULL); 4392 4393 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4394 CU_ASSERT(bdev != NULL); 4395 4396 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4397 CU_ASSERT(nvme_ns != NULL); 4398 4399 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4400 ut_bdev_io_set_buf(bdev_io1); 4401 4402 ch = spdk_get_io_channel(bdev); 4403 SPDK_CU_ASSERT_FATAL(ch != NULL); 4404 4405 nbdev_ch = spdk_io_channel_get_ctx(ch); 4406 4407 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4408 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4409 4410 nvme_qpair = io_path->qpair; 4411 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4412 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4413 4414 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4415 4416 /* If qpair is connected, I/O should succeed. */ 4417 bdev_io1->internal.in_submit_request = true; 4418 4419 bdev_nvme_submit_request(ch, bdev_io1); 4420 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4421 4422 poll_threads(); 4423 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4424 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4425 4426 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4427 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4428 nbdev_ch->current_io_path = NULL; 4429 4430 bdev_io1->internal.in_submit_request = true; 4431 4432 bdev_nvme_submit_request(ch, bdev_io1); 4433 4434 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4435 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4436 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4437 4438 /* ANA state became accessible while I/O was queued. */ 4439 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4440 4441 spdk_delay_us(1000000); 4442 4443 poll_thread_times(0, 1); 4444 4445 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4446 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4447 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4448 4449 poll_threads(); 4450 4451 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4452 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4453 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4454 4455 free(bdev_io1); 4456 4457 spdk_put_io_channel(ch); 4458 4459 poll_threads(); 4460 4461 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4462 CU_ASSERT(rc == 0); 4463 4464 poll_threads(); 4465 spdk_delay_us(1000); 4466 poll_threads(); 4467 4468 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4469 } 4470 4471 static void 4472 test_retry_io_for_io_path_error(void) 4473 { 4474 struct nvme_path_id path1 = {}, path2 = {}; 4475 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4476 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4477 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4478 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4479 const int STRING_SIZE = 32; 4480 const char *attached_names[STRING_SIZE]; 4481 struct nvme_bdev *bdev; 4482 struct nvme_ns *nvme_ns1, *nvme_ns2; 4483 struct spdk_bdev_io *bdev_io; 4484 struct nvme_bdev_io *bio; 4485 struct spdk_io_channel *ch; 4486 struct nvme_bdev_channel *nbdev_ch; 4487 struct nvme_io_path *io_path1, *io_path2; 4488 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4489 struct ut_nvme_req *req; 4490 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4491 int rc; 4492 4493 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4494 ut_init_trid(&path1.trid); 4495 ut_init_trid2(&path2.trid); 4496 4497 g_opts.bdev_retry_count = 1; 4498 4499 set_thread(0); 4500 4501 g_ut_attach_ctrlr_status = 0; 4502 g_ut_attach_bdev_count = 1; 4503 4504 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4505 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4506 4507 ctrlr1->ns[0].uuid = &uuid1; 4508 4509 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4510 attach_ctrlr_done, NULL, &opts, NULL, true); 4511 CU_ASSERT(rc == 0); 4512 4513 spdk_delay_us(1000); 4514 poll_threads(); 4515 4516 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4517 poll_threads(); 4518 4519 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4520 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4521 4522 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4523 CU_ASSERT(nvme_ctrlr1 != NULL); 4524 4525 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4526 CU_ASSERT(bdev != NULL); 4527 4528 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4529 CU_ASSERT(nvme_ns1 != NULL); 4530 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4531 4532 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4533 ut_bdev_io_set_buf(bdev_io); 4534 4535 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4536 4537 ch = spdk_get_io_channel(bdev); 4538 SPDK_CU_ASSERT_FATAL(ch != NULL); 4539 4540 nbdev_ch = spdk_io_channel_get_ctx(ch); 4541 4542 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4543 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4544 4545 nvme_qpair1 = io_path1->qpair; 4546 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4547 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4548 4549 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4550 4551 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4552 bdev_io->internal.in_submit_request = true; 4553 4554 bdev_nvme_submit_request(ch, bdev_io); 4555 4556 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4557 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4558 4559 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4560 SPDK_CU_ASSERT_FATAL(req != NULL); 4561 4562 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4563 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4564 req->cpl.status.dnr = 1; 4565 4566 poll_thread_times(0, 1); 4567 4568 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4569 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4570 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4571 4572 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4573 bdev_io->internal.in_submit_request = true; 4574 4575 bdev_nvme_submit_request(ch, bdev_io); 4576 4577 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4578 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4579 4580 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4581 SPDK_CU_ASSERT_FATAL(req != NULL); 4582 4583 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4584 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4585 4586 poll_thread_times(0, 1); 4587 4588 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4589 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4590 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4591 4592 poll_threads(); 4593 4594 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4595 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4596 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4597 4598 /* Add io_path2 dynamically, and create a multipath configuration. */ 4599 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4600 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4601 4602 ctrlr2->ns[0].uuid = &uuid1; 4603 4604 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4605 attach_ctrlr_done, NULL, &opts, NULL, true); 4606 CU_ASSERT(rc == 0); 4607 4608 spdk_delay_us(1000); 4609 poll_threads(); 4610 4611 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4612 poll_threads(); 4613 4614 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4615 CU_ASSERT(nvme_ctrlr2 != NULL); 4616 4617 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4618 CU_ASSERT(nvme_ns2 != NULL); 4619 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4620 4621 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4622 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4623 4624 nvme_qpair2 = io_path2->qpair; 4625 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4626 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4627 4628 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4629 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4630 * So after a retry, I/O is submitted to io_path2 and should succeed. 4631 */ 4632 bdev_io->internal.in_submit_request = true; 4633 4634 bdev_nvme_submit_request(ch, bdev_io); 4635 4636 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4637 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4638 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4639 4640 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4641 SPDK_CU_ASSERT_FATAL(req != NULL); 4642 4643 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4644 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4645 4646 poll_thread_times(0, 1); 4647 4648 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4649 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4650 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4651 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4652 4653 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4654 nvme_qpair1->qpair = NULL; 4655 4656 poll_threads(); 4657 4658 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4659 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4660 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4661 4662 free(bdev_io); 4663 4664 spdk_put_io_channel(ch); 4665 4666 poll_threads(); 4667 4668 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4669 CU_ASSERT(rc == 0); 4670 4671 poll_threads(); 4672 spdk_delay_us(1000); 4673 poll_threads(); 4674 4675 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4676 4677 g_opts.bdev_retry_count = 0; 4678 } 4679 4680 static void 4681 test_retry_io_count(void) 4682 { 4683 struct nvme_path_id path = {}; 4684 struct spdk_nvme_ctrlr *ctrlr; 4685 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4686 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4687 struct nvme_ctrlr *nvme_ctrlr; 4688 const int STRING_SIZE = 32; 4689 const char *attached_names[STRING_SIZE]; 4690 struct nvme_bdev *bdev; 4691 struct nvme_ns *nvme_ns; 4692 struct spdk_bdev_io *bdev_io; 4693 struct nvme_bdev_io *bio; 4694 struct spdk_io_channel *ch; 4695 struct nvme_bdev_channel *nbdev_ch; 4696 struct nvme_io_path *io_path; 4697 struct nvme_qpair *nvme_qpair; 4698 struct ut_nvme_req *req; 4699 int rc; 4700 4701 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4702 ut_init_trid(&path.trid); 4703 4704 set_thread(0); 4705 4706 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4707 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4708 4709 g_ut_attach_ctrlr_status = 0; 4710 g_ut_attach_bdev_count = 1; 4711 4712 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4713 attach_ctrlr_done, NULL, &opts, NULL, false); 4714 CU_ASSERT(rc == 0); 4715 4716 spdk_delay_us(1000); 4717 poll_threads(); 4718 4719 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4720 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4721 4722 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 4723 CU_ASSERT(nvme_ctrlr != NULL); 4724 4725 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4726 CU_ASSERT(bdev != NULL); 4727 4728 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4729 CU_ASSERT(nvme_ns != NULL); 4730 4731 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4732 ut_bdev_io_set_buf(bdev_io); 4733 4734 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4735 4736 ch = spdk_get_io_channel(bdev); 4737 SPDK_CU_ASSERT_FATAL(ch != NULL); 4738 4739 nbdev_ch = spdk_io_channel_get_ctx(ch); 4740 4741 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4742 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4743 4744 nvme_qpair = io_path->qpair; 4745 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4746 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4747 4748 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4749 4750 /* If I/O is aborted by request, it should not be retried. */ 4751 g_opts.bdev_retry_count = 1; 4752 4753 bdev_io->internal.in_submit_request = true; 4754 4755 bdev_nvme_submit_request(ch, bdev_io); 4756 4757 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4758 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4759 4760 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4761 SPDK_CU_ASSERT_FATAL(req != NULL); 4762 4763 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4764 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4765 4766 poll_thread_times(0, 1); 4767 4768 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4769 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4770 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4771 4772 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4773 * the failed I/O should not be retried. 4774 */ 4775 g_opts.bdev_retry_count = 4; 4776 4777 bdev_io->internal.in_submit_request = true; 4778 4779 bdev_nvme_submit_request(ch, bdev_io); 4780 4781 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4782 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4783 4784 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4785 SPDK_CU_ASSERT_FATAL(req != NULL); 4786 4787 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4788 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4789 bio->retry_count = 4; 4790 4791 poll_thread_times(0, 1); 4792 4793 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4794 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4795 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4796 4797 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4798 g_opts.bdev_retry_count = -1; 4799 4800 bdev_io->internal.in_submit_request = true; 4801 4802 bdev_nvme_submit_request(ch, bdev_io); 4803 4804 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4805 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4806 4807 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4808 SPDK_CU_ASSERT_FATAL(req != NULL); 4809 4810 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4811 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4812 bio->retry_count = 4; 4813 4814 poll_thread_times(0, 1); 4815 4816 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4817 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4818 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4819 4820 poll_threads(); 4821 4822 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4823 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4824 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4825 4826 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4827 * the failed I/O should be retried. 4828 */ 4829 g_opts.bdev_retry_count = 4; 4830 4831 bdev_io->internal.in_submit_request = true; 4832 4833 bdev_nvme_submit_request(ch, bdev_io); 4834 4835 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4836 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4837 4838 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4839 SPDK_CU_ASSERT_FATAL(req != NULL); 4840 4841 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4842 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4843 bio->retry_count = 3; 4844 4845 poll_thread_times(0, 1); 4846 4847 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4848 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4849 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4850 4851 poll_threads(); 4852 4853 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4854 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4855 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4856 4857 free(bdev_io); 4858 4859 spdk_put_io_channel(ch); 4860 4861 poll_threads(); 4862 4863 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4864 CU_ASSERT(rc == 0); 4865 4866 poll_threads(); 4867 spdk_delay_us(1000); 4868 poll_threads(); 4869 4870 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4871 4872 g_opts.bdev_retry_count = 0; 4873 } 4874 4875 static void 4876 test_concurrent_read_ana_log_page(void) 4877 { 4878 struct spdk_nvme_transport_id trid = {}; 4879 struct spdk_nvme_ctrlr *ctrlr; 4880 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4881 struct nvme_ctrlr *nvme_ctrlr; 4882 const int STRING_SIZE = 32; 4883 const char *attached_names[STRING_SIZE]; 4884 int rc; 4885 4886 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4887 ut_init_trid(&trid); 4888 4889 set_thread(0); 4890 4891 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4892 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4893 4894 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4895 4896 g_ut_attach_ctrlr_status = 0; 4897 g_ut_attach_bdev_count = 1; 4898 4899 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4900 attach_ctrlr_done, NULL, &opts, NULL, false); 4901 CU_ASSERT(rc == 0); 4902 4903 spdk_delay_us(1000); 4904 poll_threads(); 4905 4906 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4907 poll_threads(); 4908 4909 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4910 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4911 4912 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4913 4914 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4915 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4916 4917 /* Following read request should be rejected. */ 4918 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4919 4920 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4921 4922 set_thread(1); 4923 4924 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4925 4926 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4927 4928 /* Reset request while reading ANA log page should not be rejected. */ 4929 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4930 CU_ASSERT(rc == 0); 4931 4932 poll_threads(); 4933 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4934 poll_threads(); 4935 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4936 poll_threads(); 4937 4938 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4939 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4940 4941 /* Read ANA log page while resetting ctrlr should be rejected. */ 4942 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4943 CU_ASSERT(rc == 0); 4944 4945 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4946 4947 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4948 4949 poll_threads(); 4950 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4951 poll_threads(); 4952 4953 set_thread(0); 4954 4955 /* It is possible that target sent ANA change for inactive namespaces. 4956 * 4957 * Previously, assert() was added because this case was unlikely. 4958 * However, assert() was hit in real environment. 4959 4960 * Hence, remove assert() and add unit test case. 4961 * 4962 * Simulate this case by depopulating namespaces and then parsing ANA 4963 * log page created when all namespaces are active. 4964 * Then, check if parsing ANA log page completes successfully. 4965 */ 4966 nvme_ctrlr_depopulate_namespaces(nvme_ctrlr); 4967 4968 rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr); 4969 CU_ASSERT(rc == 0); 4970 4971 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4972 CU_ASSERT(rc == 0); 4973 4974 poll_threads(); 4975 spdk_delay_us(1000); 4976 poll_threads(); 4977 4978 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4979 } 4980 4981 static void 4982 test_retry_io_for_ana_error(void) 4983 { 4984 struct nvme_path_id path = {}; 4985 struct spdk_nvme_ctrlr *ctrlr; 4986 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4987 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4988 struct nvme_ctrlr *nvme_ctrlr; 4989 const int STRING_SIZE = 32; 4990 const char *attached_names[STRING_SIZE]; 4991 struct nvme_bdev *bdev; 4992 struct nvme_ns *nvme_ns; 4993 struct spdk_bdev_io *bdev_io; 4994 struct nvme_bdev_io *bio; 4995 struct spdk_io_channel *ch; 4996 struct nvme_bdev_channel *nbdev_ch; 4997 struct nvme_io_path *io_path; 4998 struct nvme_qpair *nvme_qpair; 4999 struct ut_nvme_req *req; 5000 uint64_t now; 5001 int rc; 5002 5003 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5004 ut_init_trid(&path.trid); 5005 5006 g_opts.bdev_retry_count = 1; 5007 5008 set_thread(0); 5009 5010 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 5011 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5012 5013 g_ut_attach_ctrlr_status = 0; 5014 g_ut_attach_bdev_count = 1; 5015 5016 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5017 attach_ctrlr_done, NULL, &opts, NULL, false); 5018 CU_ASSERT(rc == 0); 5019 5020 spdk_delay_us(1000); 5021 poll_threads(); 5022 5023 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5024 poll_threads(); 5025 5026 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5027 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5028 5029 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 5030 CU_ASSERT(nvme_ctrlr != NULL); 5031 5032 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5033 CU_ASSERT(bdev != NULL); 5034 5035 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5036 CU_ASSERT(nvme_ns != NULL); 5037 5038 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5039 ut_bdev_io_set_buf(bdev_io); 5040 5041 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5042 5043 ch = spdk_get_io_channel(bdev); 5044 SPDK_CU_ASSERT_FATAL(ch != NULL); 5045 5046 nbdev_ch = spdk_io_channel_get_ctx(ch); 5047 5048 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5049 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5050 5051 nvme_qpair = io_path->qpair; 5052 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5053 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5054 5055 now = spdk_get_ticks(); 5056 5057 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5058 5059 /* If I/O got ANA error, it should be queued, the corresponding namespace 5060 * should be freezed and its ANA state should be updated. 5061 */ 5062 bdev_io->internal.in_submit_request = true; 5063 5064 bdev_nvme_submit_request(ch, bdev_io); 5065 5066 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5067 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5068 5069 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5070 SPDK_CU_ASSERT_FATAL(req != NULL); 5071 5072 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5073 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5074 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5075 5076 poll_thread_times(0, 1); 5077 5078 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5079 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5080 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5081 /* I/O should be retried immediately. */ 5082 CU_ASSERT(bio->retry_ticks == now); 5083 CU_ASSERT(nvme_ns->ana_state_updating == true); 5084 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5085 5086 poll_threads(); 5087 5088 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5089 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5090 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5091 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5092 /* I/O should be retried after a second if no I/O path was found but 5093 * any I/O path may become available. 5094 */ 5095 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5096 5097 /* Namespace should be unfreezed after completing to update its ANA state. */ 5098 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5099 poll_threads(); 5100 5101 CU_ASSERT(nvme_ns->ana_state_updating == false); 5102 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5103 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5104 5105 /* Retry the queued I/O should succeed. */ 5106 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5107 poll_threads(); 5108 5109 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5110 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5111 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5112 5113 free(bdev_io); 5114 5115 spdk_put_io_channel(ch); 5116 5117 poll_threads(); 5118 5119 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5120 CU_ASSERT(rc == 0); 5121 5122 poll_threads(); 5123 spdk_delay_us(1000); 5124 poll_threads(); 5125 5126 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5127 5128 g_opts.bdev_retry_count = 0; 5129 } 5130 5131 static void 5132 test_check_io_error_resiliency_params(void) 5133 { 5134 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5135 * 3rd parameter is fast_io_fail_timeout_sec. 5136 */ 5137 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5138 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5139 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5140 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5141 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5142 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5143 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5144 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5145 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5146 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5147 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5148 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5149 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5150 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5151 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5152 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5153 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5154 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5155 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5156 } 5157 5158 static void 5159 test_retry_io_if_ctrlr_is_resetting(void) 5160 { 5161 struct nvme_path_id path = {}; 5162 struct nvme_ctrlr_opts opts = {}; 5163 struct spdk_nvme_ctrlr *ctrlr; 5164 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5165 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5166 struct nvme_ctrlr *nvme_ctrlr; 5167 const int STRING_SIZE = 32; 5168 const char *attached_names[STRING_SIZE]; 5169 struct nvme_bdev *bdev; 5170 struct nvme_ns *nvme_ns; 5171 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5172 struct spdk_io_channel *ch; 5173 struct nvme_bdev_channel *nbdev_ch; 5174 struct nvme_io_path *io_path; 5175 struct nvme_qpair *nvme_qpair; 5176 int rc; 5177 5178 g_opts.bdev_retry_count = 1; 5179 5180 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5181 ut_init_trid(&path.trid); 5182 5183 set_thread(0); 5184 5185 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5186 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5187 5188 g_ut_attach_ctrlr_status = 0; 5189 g_ut_attach_bdev_count = 1; 5190 5191 opts.ctrlr_loss_timeout_sec = -1; 5192 opts.reconnect_delay_sec = 1; 5193 5194 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5195 attach_ctrlr_done, NULL, &dopts, &opts, false); 5196 CU_ASSERT(rc == 0); 5197 5198 spdk_delay_us(1000); 5199 poll_threads(); 5200 5201 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5202 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5203 5204 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5205 CU_ASSERT(nvme_ctrlr != NULL); 5206 5207 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5208 CU_ASSERT(bdev != NULL); 5209 5210 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5211 CU_ASSERT(nvme_ns != NULL); 5212 5213 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5214 ut_bdev_io_set_buf(bdev_io1); 5215 5216 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5217 ut_bdev_io_set_buf(bdev_io2); 5218 5219 ch = spdk_get_io_channel(bdev); 5220 SPDK_CU_ASSERT_FATAL(ch != NULL); 5221 5222 nbdev_ch = spdk_io_channel_get_ctx(ch); 5223 5224 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5225 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5226 5227 nvme_qpair = io_path->qpair; 5228 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5229 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5230 5231 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5232 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5233 5234 /* If qpair is connected, I/O should succeed. */ 5235 bdev_io1->internal.in_submit_request = true; 5236 5237 bdev_nvme_submit_request(ch, bdev_io1); 5238 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5239 5240 poll_threads(); 5241 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5242 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5243 5244 /* If qpair is disconnected, it is freed and then reconnected via resetting 5245 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5246 * while resetting the nvme_ctrlr. 5247 */ 5248 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5249 ctrlr->is_failed = true; 5250 5251 poll_thread_times(0, 5); 5252 5253 CU_ASSERT(nvme_qpair->qpair == NULL); 5254 CU_ASSERT(nvme_ctrlr->resetting == true); 5255 CU_ASSERT(ctrlr->is_failed == false); 5256 5257 bdev_io1->internal.in_submit_request = true; 5258 5259 bdev_nvme_submit_request(ch, bdev_io1); 5260 5261 spdk_delay_us(1); 5262 5263 bdev_io2->internal.in_submit_request = true; 5264 5265 bdev_nvme_submit_request(ch, bdev_io2); 5266 5267 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5268 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5269 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5270 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5271 5272 poll_threads(); 5273 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5274 poll_threads(); 5275 5276 CU_ASSERT(nvme_qpair->qpair != NULL); 5277 CU_ASSERT(nvme_ctrlr->resetting == false); 5278 5279 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5280 5281 poll_thread_times(0, 1); 5282 5283 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5284 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5285 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5286 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5287 5288 poll_threads(); 5289 5290 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5291 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5292 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5293 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5294 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5295 5296 spdk_delay_us(1); 5297 5298 poll_thread_times(0, 1); 5299 5300 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5301 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5302 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5303 5304 poll_threads(); 5305 5306 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5307 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5308 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5309 5310 free(bdev_io1); 5311 free(bdev_io2); 5312 5313 spdk_put_io_channel(ch); 5314 5315 poll_threads(); 5316 5317 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5318 CU_ASSERT(rc == 0); 5319 5320 poll_threads(); 5321 spdk_delay_us(1000); 5322 poll_threads(); 5323 5324 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5325 5326 g_opts.bdev_retry_count = 0; 5327 } 5328 5329 static void 5330 test_reconnect_ctrlr(void) 5331 { 5332 struct spdk_nvme_transport_id trid = {}; 5333 struct spdk_nvme_ctrlr ctrlr = {}; 5334 struct nvme_ctrlr *nvme_ctrlr; 5335 struct spdk_io_channel *ch1, *ch2; 5336 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5337 int rc; 5338 5339 ut_init_trid(&trid); 5340 TAILQ_INIT(&ctrlr.active_io_qpairs); 5341 5342 set_thread(0); 5343 5344 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5345 CU_ASSERT(rc == 0); 5346 5347 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5348 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5349 5350 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5351 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5352 5353 ch1 = spdk_get_io_channel(nvme_ctrlr); 5354 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5355 5356 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5357 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5358 5359 set_thread(1); 5360 5361 ch2 = spdk_get_io_channel(nvme_ctrlr); 5362 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5363 5364 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5365 5366 /* Reset starts from thread 1. */ 5367 set_thread(1); 5368 5369 /* The reset should fail and a reconnect timer should be registered. */ 5370 ctrlr.fail_reset = true; 5371 ctrlr.is_failed = true; 5372 5373 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5374 CU_ASSERT(rc == 0); 5375 CU_ASSERT(nvme_ctrlr->resetting == true); 5376 CU_ASSERT(ctrlr.is_failed == true); 5377 5378 poll_threads(); 5379 5380 CU_ASSERT(nvme_ctrlr->resetting == false); 5381 CU_ASSERT(ctrlr.is_failed == false); 5382 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5383 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5384 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5385 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5386 5387 /* A new reset starts from thread 0. */ 5388 set_thread(1); 5389 5390 /* The reset should cancel the reconnect timer and should start from reconnection. 5391 * Then, the reset should fail and a reconnect timer should be registered again. 5392 */ 5393 ctrlr.fail_reset = true; 5394 ctrlr.is_failed = true; 5395 5396 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5397 CU_ASSERT(rc == 0); 5398 CU_ASSERT(nvme_ctrlr->resetting == true); 5399 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5400 CU_ASSERT(ctrlr.is_failed == true); 5401 5402 poll_threads(); 5403 5404 CU_ASSERT(nvme_ctrlr->resetting == false); 5405 CU_ASSERT(ctrlr.is_failed == false); 5406 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5407 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5408 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5409 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5410 5411 /* Then a reconnect retry should suceeed. */ 5412 ctrlr.fail_reset = false; 5413 5414 spdk_delay_us(SPDK_SEC_TO_USEC); 5415 poll_thread_times(0, 1); 5416 5417 CU_ASSERT(nvme_ctrlr->resetting == true); 5418 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5419 5420 poll_threads(); 5421 5422 CU_ASSERT(nvme_ctrlr->resetting == false); 5423 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5424 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5425 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5426 5427 /* The reset should fail and a reconnect timer should be registered. */ 5428 ctrlr.fail_reset = true; 5429 ctrlr.is_failed = true; 5430 5431 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5432 CU_ASSERT(rc == 0); 5433 CU_ASSERT(nvme_ctrlr->resetting == true); 5434 CU_ASSERT(ctrlr.is_failed == true); 5435 5436 poll_threads(); 5437 5438 CU_ASSERT(nvme_ctrlr->resetting == false); 5439 CU_ASSERT(ctrlr.is_failed == false); 5440 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5441 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5442 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5443 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5444 5445 /* Then a reconnect retry should still fail. */ 5446 spdk_delay_us(SPDK_SEC_TO_USEC); 5447 poll_thread_times(0, 1); 5448 5449 CU_ASSERT(nvme_ctrlr->resetting == true); 5450 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5451 5452 poll_threads(); 5453 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5454 poll_threads(); 5455 5456 CU_ASSERT(nvme_ctrlr->resetting == false); 5457 CU_ASSERT(ctrlr.is_failed == false); 5458 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5459 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5460 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5461 5462 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5463 spdk_delay_us(SPDK_SEC_TO_USEC); 5464 poll_threads(); 5465 5466 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5467 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5468 CU_ASSERT(nvme_ctrlr->destruct == true); 5469 5470 spdk_put_io_channel(ch2); 5471 5472 set_thread(0); 5473 5474 spdk_put_io_channel(ch1); 5475 5476 poll_threads(); 5477 spdk_delay_us(1000); 5478 poll_threads(); 5479 5480 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5481 } 5482 5483 static struct nvme_path_id * 5484 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5485 const struct spdk_nvme_transport_id *trid) 5486 { 5487 struct nvme_path_id *p; 5488 5489 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5490 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5491 break; 5492 } 5493 } 5494 5495 return p; 5496 } 5497 5498 static void 5499 test_retry_failover_ctrlr(void) 5500 { 5501 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5502 struct spdk_nvme_ctrlr ctrlr = {}; 5503 struct nvme_ctrlr *nvme_ctrlr = NULL; 5504 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5505 struct spdk_io_channel *ch; 5506 struct nvme_ctrlr_channel *ctrlr_ch; 5507 int rc; 5508 5509 ut_init_trid(&trid1); 5510 ut_init_trid2(&trid2); 5511 ut_init_trid3(&trid3); 5512 TAILQ_INIT(&ctrlr.active_io_qpairs); 5513 5514 set_thread(0); 5515 5516 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5517 CU_ASSERT(rc == 0); 5518 5519 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5520 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5521 5522 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5523 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5524 5525 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5526 CU_ASSERT(rc == 0); 5527 5528 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5529 CU_ASSERT(rc == 0); 5530 5531 ch = spdk_get_io_channel(nvme_ctrlr); 5532 SPDK_CU_ASSERT_FATAL(ch != NULL); 5533 5534 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5535 5536 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5537 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5538 CU_ASSERT(path_id1->last_failed_tsc == 0); 5539 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5540 5541 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5542 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5543 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5544 5545 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5546 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5547 5548 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5549 * and a reconnect timer is started. */ 5550 ctrlr.fail_reset = true; 5551 ctrlr.is_failed = true; 5552 5553 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5554 CU_ASSERT(rc == 0); 5555 5556 poll_threads(); 5557 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5558 poll_threads(); 5559 5560 CU_ASSERT(nvme_ctrlr->resetting == false); 5561 CU_ASSERT(ctrlr.is_failed == false); 5562 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5563 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5564 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5565 CU_ASSERT(path_id1->last_failed_tsc != 0); 5566 5567 CU_ASSERT(path_id2->last_failed_tsc != 0); 5568 CU_ASSERT(path_id3->last_failed_tsc != 0); 5569 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5570 5571 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5572 * switched to trid2 but reset is not started. 5573 */ 5574 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5575 CU_ASSERT(rc == -EALREADY); 5576 5577 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5578 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5579 5580 CU_ASSERT(nvme_ctrlr->resetting == false); 5581 5582 /* If reconnect succeeds, trid2 should be the active path_id */ 5583 ctrlr.fail_reset = false; 5584 5585 spdk_delay_us(SPDK_SEC_TO_USEC); 5586 poll_thread_times(0, 1); 5587 5588 CU_ASSERT(nvme_ctrlr->resetting == true); 5589 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5590 5591 poll_threads(); 5592 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5593 poll_threads(); 5594 5595 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5596 CU_ASSERT(path_id2->last_failed_tsc == 0); 5597 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5598 CU_ASSERT(nvme_ctrlr->resetting == false); 5599 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5600 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5601 5602 spdk_put_io_channel(ch); 5603 5604 poll_threads(); 5605 5606 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5607 CU_ASSERT(rc == 0); 5608 5609 poll_threads(); 5610 spdk_delay_us(1000); 5611 poll_threads(); 5612 5613 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5614 } 5615 5616 static void 5617 test_fail_path(void) 5618 { 5619 struct nvme_path_id path = {}; 5620 struct nvme_ctrlr_opts opts = {}; 5621 struct spdk_nvme_ctrlr *ctrlr; 5622 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5623 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5624 struct nvme_ctrlr *nvme_ctrlr; 5625 const int STRING_SIZE = 32; 5626 const char *attached_names[STRING_SIZE]; 5627 struct nvme_bdev *bdev; 5628 struct nvme_ns *nvme_ns; 5629 struct spdk_bdev_io *bdev_io; 5630 struct spdk_io_channel *ch; 5631 struct nvme_bdev_channel *nbdev_ch; 5632 struct nvme_io_path *io_path; 5633 struct nvme_ctrlr_channel *ctrlr_ch; 5634 int rc; 5635 5636 /* The test scenario is the following. 5637 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5638 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5639 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5640 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5641 * comes first. The queued I/O is failed. 5642 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5643 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5644 */ 5645 5646 g_opts.bdev_retry_count = 1; 5647 5648 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5649 ut_init_trid(&path.trid); 5650 5651 set_thread(0); 5652 5653 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5654 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5655 5656 g_ut_attach_ctrlr_status = 0; 5657 g_ut_attach_bdev_count = 1; 5658 5659 opts.ctrlr_loss_timeout_sec = 4; 5660 opts.reconnect_delay_sec = 1; 5661 opts.fast_io_fail_timeout_sec = 2; 5662 5663 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5664 attach_ctrlr_done, NULL, &dopts, &opts, false); 5665 CU_ASSERT(rc == 0); 5666 5667 spdk_delay_us(1000); 5668 poll_threads(); 5669 5670 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5671 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5672 5673 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5674 CU_ASSERT(nvme_ctrlr != NULL); 5675 5676 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5677 CU_ASSERT(bdev != NULL); 5678 5679 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5680 CU_ASSERT(nvme_ns != NULL); 5681 5682 ch = spdk_get_io_channel(bdev); 5683 SPDK_CU_ASSERT_FATAL(ch != NULL); 5684 5685 nbdev_ch = spdk_io_channel_get_ctx(ch); 5686 5687 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5688 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5689 5690 ctrlr_ch = io_path->qpair->ctrlr_ch; 5691 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5692 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5693 5694 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5695 ut_bdev_io_set_buf(bdev_io); 5696 5697 5698 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5699 ctrlr->fail_reset = true; 5700 ctrlr->is_failed = true; 5701 5702 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5703 CU_ASSERT(rc == 0); 5704 CU_ASSERT(nvme_ctrlr->resetting == true); 5705 CU_ASSERT(ctrlr->is_failed == true); 5706 5707 poll_threads(); 5708 5709 CU_ASSERT(nvme_ctrlr->resetting == false); 5710 CU_ASSERT(ctrlr->is_failed == false); 5711 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5712 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5713 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5714 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5715 5716 /* I/O should be queued. */ 5717 bdev_io->internal.in_submit_request = true; 5718 5719 bdev_nvme_submit_request(ch, bdev_io); 5720 5721 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5722 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5723 5724 /* After a second, the I/O should be still queued and the ctrlr should be 5725 * still recovering. 5726 */ 5727 spdk_delay_us(SPDK_SEC_TO_USEC); 5728 poll_threads(); 5729 5730 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5731 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5732 5733 CU_ASSERT(nvme_ctrlr->resetting == false); 5734 CU_ASSERT(ctrlr->is_failed == false); 5735 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5736 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5737 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5738 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5739 5740 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5741 5742 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5743 spdk_delay_us(SPDK_SEC_TO_USEC); 5744 poll_threads(); 5745 5746 CU_ASSERT(nvme_ctrlr->resetting == false); 5747 CU_ASSERT(ctrlr->is_failed == false); 5748 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5749 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5750 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5751 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5752 5753 /* Then within a second, pending I/O should be failed. */ 5754 spdk_delay_us(SPDK_SEC_TO_USEC); 5755 poll_threads(); 5756 5757 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5758 poll_threads(); 5759 5760 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5761 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5762 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5763 5764 /* Another I/O submission should be failed immediately. */ 5765 bdev_io->internal.in_submit_request = true; 5766 5767 bdev_nvme_submit_request(ch, bdev_io); 5768 5769 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5770 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5771 5772 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5773 * be deleted. 5774 */ 5775 spdk_delay_us(SPDK_SEC_TO_USEC); 5776 poll_threads(); 5777 5778 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5779 poll_threads(); 5780 5781 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5782 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5783 CU_ASSERT(nvme_ctrlr->destruct == true); 5784 5785 spdk_put_io_channel(ch); 5786 5787 poll_threads(); 5788 spdk_delay_us(1000); 5789 poll_threads(); 5790 5791 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5792 5793 free(bdev_io); 5794 5795 g_opts.bdev_retry_count = 0; 5796 } 5797 5798 static void 5799 test_nvme_ns_cmp(void) 5800 { 5801 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5802 5803 nvme_ns1.id = 0; 5804 nvme_ns2.id = UINT32_MAX; 5805 5806 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5807 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5808 } 5809 5810 static void 5811 test_ana_transition(void) 5812 { 5813 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5814 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5815 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5816 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5817 5818 /* case 1: ANA transition timedout is canceled. */ 5819 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5820 nvme_ns.ana_transition_timedout = true; 5821 5822 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5823 5824 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5825 5826 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5827 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5828 5829 /* case 2: ANATT timer is kept. */ 5830 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5831 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5832 &nvme_ns, 5833 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5834 5835 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5836 5837 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5838 5839 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5840 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5841 5842 /* case 3: ANATT timer is stopped. */ 5843 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5844 5845 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5846 5847 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5848 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5849 5850 /* ANATT timer is started. */ 5851 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5852 5853 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5854 5855 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5856 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5857 5858 /* ANATT timer is expired. */ 5859 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5860 5861 poll_threads(); 5862 5863 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5864 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5865 } 5866 5867 static void 5868 _set_preferred_path_cb(void *cb_arg, int rc) 5869 { 5870 bool *done = cb_arg; 5871 5872 *done = true; 5873 } 5874 5875 static void 5876 test_set_preferred_path(void) 5877 { 5878 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5879 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5880 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5881 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5882 const int STRING_SIZE = 32; 5883 const char *attached_names[STRING_SIZE]; 5884 struct nvme_bdev *bdev; 5885 struct spdk_io_channel *ch; 5886 struct nvme_bdev_channel *nbdev_ch; 5887 struct nvme_io_path *io_path; 5888 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5889 const struct spdk_nvme_ctrlr_data *cdata; 5890 bool done; 5891 int rc; 5892 5893 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5894 ut_init_trid(&path1.trid); 5895 ut_init_trid2(&path2.trid); 5896 ut_init_trid3(&path3.trid); 5897 g_ut_attach_ctrlr_status = 0; 5898 g_ut_attach_bdev_count = 1; 5899 5900 set_thread(0); 5901 5902 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5903 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5904 5905 ctrlr1->ns[0].uuid = &uuid1; 5906 5907 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5908 attach_ctrlr_done, NULL, &opts, NULL, true); 5909 CU_ASSERT(rc == 0); 5910 5911 spdk_delay_us(1000); 5912 poll_threads(); 5913 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5914 poll_threads(); 5915 5916 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5917 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5918 5919 ctrlr2->ns[0].uuid = &uuid1; 5920 5921 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5922 attach_ctrlr_done, NULL, &opts, NULL, true); 5923 CU_ASSERT(rc == 0); 5924 5925 spdk_delay_us(1000); 5926 poll_threads(); 5927 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5928 poll_threads(); 5929 5930 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5931 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5932 5933 ctrlr3->ns[0].uuid = &uuid1; 5934 5935 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5936 attach_ctrlr_done, NULL, &opts, NULL, true); 5937 CU_ASSERT(rc == 0); 5938 5939 spdk_delay_us(1000); 5940 poll_threads(); 5941 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5942 poll_threads(); 5943 5944 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5945 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5946 5947 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5948 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5949 5950 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5951 5952 ch = spdk_get_io_channel(bdev); 5953 SPDK_CU_ASSERT_FATAL(ch != NULL); 5954 nbdev_ch = spdk_io_channel_get_ctx(ch); 5955 5956 io_path = bdev_nvme_find_io_path(nbdev_ch); 5957 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5958 5959 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5960 5961 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5962 * should return io_path to ctrlr2. 5963 */ 5964 5965 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5966 done = false; 5967 5968 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5969 5970 poll_threads(); 5971 CU_ASSERT(done == true); 5972 5973 io_path = bdev_nvme_find_io_path(nbdev_ch); 5974 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5975 5976 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5977 5978 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5979 * acquired, find_io_path() should return io_path to ctrlr3. 5980 */ 5981 5982 spdk_put_io_channel(ch); 5983 5984 poll_threads(); 5985 5986 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5987 done = false; 5988 5989 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5990 5991 poll_threads(); 5992 CU_ASSERT(done == true); 5993 5994 ch = spdk_get_io_channel(bdev); 5995 SPDK_CU_ASSERT_FATAL(ch != NULL); 5996 nbdev_ch = spdk_io_channel_get_ctx(ch); 5997 5998 io_path = bdev_nvme_find_io_path(nbdev_ch); 5999 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6000 6001 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 6002 6003 spdk_put_io_channel(ch); 6004 6005 poll_threads(); 6006 6007 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6008 CU_ASSERT(rc == 0); 6009 6010 poll_threads(); 6011 spdk_delay_us(1000); 6012 poll_threads(); 6013 6014 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6015 } 6016 6017 static void 6018 test_find_next_io_path(void) 6019 { 6020 struct nvme_bdev_channel nbdev_ch = { 6021 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6022 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6023 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 6024 }; 6025 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6026 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6027 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6028 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6029 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6030 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6031 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6032 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6033 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6034 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6035 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6036 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6037 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6038 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6039 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6040 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6041 6042 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6043 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6044 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6045 6046 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6047 * is covered in test_find_io_path. 6048 */ 6049 6050 nbdev_ch.current_io_path = &io_path2; 6051 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6052 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6053 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6054 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6055 6056 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6057 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6058 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6059 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6060 6061 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6062 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6063 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6064 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6065 6066 nbdev_ch.current_io_path = &io_path3; 6067 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6068 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6069 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6070 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6071 6072 /* Test if next io_path is selected according to rr_min_io */ 6073 6074 nbdev_ch.current_io_path = NULL; 6075 nbdev_ch.rr_min_io = 2; 6076 nbdev_ch.rr_counter = 0; 6077 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6078 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6079 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6080 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6081 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6082 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6083 6084 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6085 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6086 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6087 } 6088 6089 static void 6090 test_find_io_path_min_qd(void) 6091 { 6092 struct nvme_bdev_channel nbdev_ch = { 6093 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6094 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6095 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6096 }; 6097 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6098 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6099 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6100 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6101 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6102 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6103 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6104 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6105 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6106 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6107 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6108 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6109 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6110 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6111 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6112 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6113 6114 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6115 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6116 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6117 6118 /* Test if the minumum io_outstanding or the ANA optimized state is 6119 * prioritized when using least queue depth selector 6120 */ 6121 qpair1.num_outstanding_reqs = 2; 6122 qpair2.num_outstanding_reqs = 1; 6123 qpair3.num_outstanding_reqs = 0; 6124 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6125 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6126 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6127 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6128 6129 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6130 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6131 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6132 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6133 6134 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6135 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6136 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6137 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6138 6139 qpair2.num_outstanding_reqs = 4; 6140 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6141 } 6142 6143 static void 6144 test_disable_auto_failback(void) 6145 { 6146 struct nvme_path_id path1 = {}, path2 = {}; 6147 struct nvme_ctrlr_opts opts = {}; 6148 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6149 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6150 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6151 struct nvme_ctrlr *nvme_ctrlr1; 6152 const int STRING_SIZE = 32; 6153 const char *attached_names[STRING_SIZE]; 6154 struct nvme_bdev *bdev; 6155 struct spdk_io_channel *ch; 6156 struct nvme_bdev_channel *nbdev_ch; 6157 struct nvme_io_path *io_path; 6158 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6159 const struct spdk_nvme_ctrlr_data *cdata; 6160 bool done; 6161 int rc; 6162 6163 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6164 ut_init_trid(&path1.trid); 6165 ut_init_trid2(&path2.trid); 6166 g_ut_attach_ctrlr_status = 0; 6167 g_ut_attach_bdev_count = 1; 6168 6169 g_opts.disable_auto_failback = true; 6170 6171 opts.ctrlr_loss_timeout_sec = -1; 6172 opts.reconnect_delay_sec = 1; 6173 6174 set_thread(0); 6175 6176 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6177 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6178 6179 ctrlr1->ns[0].uuid = &uuid1; 6180 6181 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6182 attach_ctrlr_done, NULL, &dopts, &opts, true); 6183 CU_ASSERT(rc == 0); 6184 6185 spdk_delay_us(1000); 6186 poll_threads(); 6187 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6188 poll_threads(); 6189 6190 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6191 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6192 6193 ctrlr2->ns[0].uuid = &uuid1; 6194 6195 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6196 attach_ctrlr_done, NULL, &dopts, &opts, true); 6197 CU_ASSERT(rc == 0); 6198 6199 spdk_delay_us(1000); 6200 poll_threads(); 6201 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6202 poll_threads(); 6203 6204 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6205 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6206 6207 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6208 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6209 6210 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn); 6211 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6212 6213 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6214 6215 ch = spdk_get_io_channel(bdev); 6216 SPDK_CU_ASSERT_FATAL(ch != NULL); 6217 nbdev_ch = spdk_io_channel_get_ctx(ch); 6218 6219 io_path = bdev_nvme_find_io_path(nbdev_ch); 6220 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6221 6222 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6223 6224 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6225 ctrlr1->fail_reset = true; 6226 ctrlr1->is_failed = true; 6227 6228 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6229 6230 poll_threads(); 6231 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6232 poll_threads(); 6233 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6234 poll_threads(); 6235 6236 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6237 6238 io_path = bdev_nvme_find_io_path(nbdev_ch); 6239 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6240 6241 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6242 6243 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6244 * Hence, io_path to ctrlr2 should still be used. 6245 */ 6246 ctrlr1->fail_reset = false; 6247 6248 spdk_delay_us(SPDK_SEC_TO_USEC); 6249 poll_threads(); 6250 6251 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6252 6253 io_path = bdev_nvme_find_io_path(nbdev_ch); 6254 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6255 6256 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6257 6258 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6259 * be used again. 6260 */ 6261 6262 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6263 done = false; 6264 6265 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6266 6267 poll_threads(); 6268 CU_ASSERT(done == true); 6269 6270 io_path = bdev_nvme_find_io_path(nbdev_ch); 6271 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6272 6273 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6274 6275 spdk_put_io_channel(ch); 6276 6277 poll_threads(); 6278 6279 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6280 CU_ASSERT(rc == 0); 6281 6282 poll_threads(); 6283 spdk_delay_us(1000); 6284 poll_threads(); 6285 6286 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6287 6288 g_opts.disable_auto_failback = false; 6289 } 6290 6291 static void 6292 ut_set_multipath_policy_done(void *cb_arg, int rc) 6293 { 6294 int *done = cb_arg; 6295 6296 SPDK_CU_ASSERT_FATAL(done != NULL); 6297 *done = rc; 6298 } 6299 6300 static void 6301 test_set_multipath_policy(void) 6302 { 6303 struct nvme_path_id path1 = {}, path2 = {}; 6304 struct nvme_ctrlr_opts opts = {}; 6305 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6306 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6307 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6308 const int STRING_SIZE = 32; 6309 const char *attached_names[STRING_SIZE]; 6310 struct nvme_bdev *bdev; 6311 struct spdk_io_channel *ch; 6312 struct nvme_bdev_channel *nbdev_ch; 6313 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6314 int done; 6315 int rc; 6316 6317 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6318 ut_init_trid(&path1.trid); 6319 ut_init_trid2(&path2.trid); 6320 g_ut_attach_ctrlr_status = 0; 6321 g_ut_attach_bdev_count = 1; 6322 6323 g_opts.disable_auto_failback = true; 6324 6325 opts.ctrlr_loss_timeout_sec = -1; 6326 opts.reconnect_delay_sec = 1; 6327 6328 set_thread(0); 6329 6330 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6331 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6332 6333 ctrlr1->ns[0].uuid = &uuid1; 6334 6335 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6336 attach_ctrlr_done, NULL, &dopts, &opts, true); 6337 CU_ASSERT(rc == 0); 6338 6339 spdk_delay_us(1000); 6340 poll_threads(); 6341 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6342 poll_threads(); 6343 6344 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6345 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6346 6347 ctrlr2->ns[0].uuid = &uuid1; 6348 6349 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6350 attach_ctrlr_done, NULL, &dopts, &opts, true); 6351 CU_ASSERT(rc == 0); 6352 6353 spdk_delay_us(1000); 6354 poll_threads(); 6355 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6356 poll_threads(); 6357 6358 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6359 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6360 6361 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6362 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6363 6364 /* If multipath policy is updated before getting any I/O channel, 6365 * an new I/O channel should have the update. 6366 */ 6367 done = -1; 6368 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6369 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6370 ut_set_multipath_policy_done, &done); 6371 poll_threads(); 6372 CU_ASSERT(done == 0); 6373 6374 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6375 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6376 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6377 6378 ch = spdk_get_io_channel(bdev); 6379 SPDK_CU_ASSERT_FATAL(ch != NULL); 6380 nbdev_ch = spdk_io_channel_get_ctx(ch); 6381 6382 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6383 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6384 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6385 6386 /* If multipath policy is updated while a I/O channel is active, 6387 * the update should be applied to the I/O channel immediately. 6388 */ 6389 done = -1; 6390 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6391 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6392 ut_set_multipath_policy_done, &done); 6393 poll_threads(); 6394 CU_ASSERT(done == 0); 6395 6396 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6397 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6398 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6399 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6400 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6401 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6402 6403 spdk_put_io_channel(ch); 6404 6405 poll_threads(); 6406 6407 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6408 CU_ASSERT(rc == 0); 6409 6410 poll_threads(); 6411 spdk_delay_us(1000); 6412 poll_threads(); 6413 6414 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6415 } 6416 6417 static void 6418 test_uuid_generation(void) 6419 { 6420 uint32_t nsid1 = 1, nsid2 = 2; 6421 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6422 char sn3[21] = " "; 6423 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6424 struct spdk_uuid uuid1, uuid2; 6425 int rc; 6426 6427 /* Test case 1: 6428 * Serial numbers are the same, nsids are different. 6429 * Compare two generated UUID - they should be different. */ 6430 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6431 CU_ASSERT(rc == 0); 6432 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6433 CU_ASSERT(rc == 0); 6434 6435 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6436 6437 /* Test case 2: 6438 * Serial numbers differ only by one character, nsids are the same. 6439 * Compare two generated UUID - they should be different. */ 6440 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6441 CU_ASSERT(rc == 0); 6442 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6443 CU_ASSERT(rc == 0); 6444 6445 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6446 6447 /* Test case 3: 6448 * Serial number comprises only of space characters. 6449 * Validate the generated UUID. */ 6450 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6451 CU_ASSERT(rc == 0); 6452 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6453 6454 } 6455 6456 static void 6457 test_retry_io_to_same_path(void) 6458 { 6459 struct nvme_path_id path1 = {}, path2 = {}; 6460 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6461 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 6462 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6463 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6464 const int STRING_SIZE = 32; 6465 const char *attached_names[STRING_SIZE]; 6466 struct nvme_bdev *bdev; 6467 struct spdk_bdev_io *bdev_io; 6468 struct nvme_bdev_io *bio; 6469 struct spdk_io_channel *ch; 6470 struct nvme_bdev_channel *nbdev_ch; 6471 struct nvme_io_path *io_path1, *io_path2; 6472 struct ut_nvme_req *req; 6473 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6474 int done; 6475 int rc; 6476 6477 g_opts.nvme_ioq_poll_period_us = 1; 6478 6479 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6480 ut_init_trid(&path1.trid); 6481 ut_init_trid2(&path2.trid); 6482 g_ut_attach_ctrlr_status = 0; 6483 g_ut_attach_bdev_count = 1; 6484 6485 set_thread(0); 6486 6487 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6488 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6489 6490 ctrlr1->ns[0].uuid = &uuid1; 6491 6492 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6493 attach_ctrlr_done, NULL, &opts, NULL, true); 6494 CU_ASSERT(rc == 0); 6495 6496 spdk_delay_us(1000); 6497 poll_threads(); 6498 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6499 poll_threads(); 6500 6501 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6502 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6503 6504 ctrlr2->ns[0].uuid = &uuid1; 6505 6506 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6507 attach_ctrlr_done, NULL, &opts, NULL, true); 6508 CU_ASSERT(rc == 0); 6509 6510 spdk_delay_us(1000); 6511 poll_threads(); 6512 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6513 poll_threads(); 6514 6515 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6516 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6517 6518 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 6519 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6520 6521 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 6522 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6523 6524 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6525 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6526 6527 done = -1; 6528 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6529 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6530 poll_threads(); 6531 CU_ASSERT(done == 0); 6532 6533 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6534 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6535 CU_ASSERT(bdev->rr_min_io == 1); 6536 6537 ch = spdk_get_io_channel(bdev); 6538 SPDK_CU_ASSERT_FATAL(ch != NULL); 6539 nbdev_ch = spdk_io_channel_get_ctx(ch); 6540 6541 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6542 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6543 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6544 6545 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6546 ut_bdev_io_set_buf(bdev_io); 6547 6548 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6549 6550 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6551 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6552 6553 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6554 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6555 6556 /* The 1st I/O should be submitted to io_path1. */ 6557 bdev_io->internal.in_submit_request = true; 6558 6559 bdev_nvme_submit_request(ch, bdev_io); 6560 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6561 CU_ASSERT(bio->io_path == io_path1); 6562 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6563 6564 spdk_delay_us(1); 6565 6566 poll_threads(); 6567 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6568 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6569 6570 /* The 2nd I/O should be submitted to io_path2 because the path selection 6571 * policy is round-robin. 6572 */ 6573 bdev_io->internal.in_submit_request = true; 6574 6575 bdev_nvme_submit_request(ch, bdev_io); 6576 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6577 CU_ASSERT(bio->io_path == io_path2); 6578 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6579 6580 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6581 SPDK_CU_ASSERT_FATAL(req != NULL); 6582 6583 /* Set retry count to non-zero. */ 6584 g_opts.bdev_retry_count = 2; 6585 6586 /* Inject an I/O error. */ 6587 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6588 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6589 6590 /* The 2nd I/O should be queued to nbdev_ch. */ 6591 spdk_delay_us(1); 6592 poll_thread_times(0, 1); 6593 6594 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6595 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6596 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6597 6598 /* The 2nd I/O should keep caching io_path2. */ 6599 CU_ASSERT(bio->io_path == io_path2); 6600 6601 /* The 2nd I/O should be submitted to io_path2 again. */ 6602 poll_thread_times(0, 1); 6603 6604 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6605 CU_ASSERT(bio->io_path == io_path2); 6606 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6607 6608 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6609 SPDK_CU_ASSERT_FATAL(req != NULL); 6610 6611 /* Inject an I/O error again. */ 6612 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6613 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6614 req->cpl.status.crd = 1; 6615 6616 ctrlr2->cdata.crdt[1] = 1; 6617 6618 /* The 2nd I/O should be queued to nbdev_ch. */ 6619 spdk_delay_us(1); 6620 poll_thread_times(0, 1); 6621 6622 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6623 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6624 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6625 6626 /* The 2nd I/O should keep caching io_path2. */ 6627 CU_ASSERT(bio->io_path == io_path2); 6628 6629 /* Detach ctrlr2 dynamically. */ 6630 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6631 CU_ASSERT(rc == 0); 6632 6633 spdk_delay_us(1000); 6634 poll_threads(); 6635 spdk_delay_us(1000); 6636 poll_threads(); 6637 spdk_delay_us(1000); 6638 poll_threads(); 6639 spdk_delay_us(1000); 6640 poll_threads(); 6641 6642 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 6643 6644 poll_threads(); 6645 spdk_delay_us(100000); 6646 poll_threads(); 6647 spdk_delay_us(1); 6648 poll_threads(); 6649 6650 /* The 2nd I/O should succeed by io_path1. */ 6651 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6652 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6653 CU_ASSERT(bio->io_path == io_path1); 6654 6655 free(bdev_io); 6656 6657 spdk_put_io_channel(ch); 6658 6659 poll_threads(); 6660 spdk_delay_us(1); 6661 poll_threads(); 6662 6663 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6664 CU_ASSERT(rc == 0); 6665 6666 poll_threads(); 6667 spdk_delay_us(1000); 6668 poll_threads(); 6669 6670 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6671 6672 g_opts.nvme_ioq_poll_period_us = 0; 6673 g_opts.bdev_retry_count = 0; 6674 } 6675 6676 /* This case is to verify a fix for a complex race condition that 6677 * failover is lost if fabric connect command gets timeout while 6678 * controller is being reset. 6679 */ 6680 static void 6681 test_race_between_reset_and_disconnected(void) 6682 { 6683 struct spdk_nvme_transport_id trid = {}; 6684 struct spdk_nvme_ctrlr ctrlr = {}; 6685 struct nvme_ctrlr *nvme_ctrlr = NULL; 6686 struct nvme_path_id *curr_trid; 6687 struct spdk_io_channel *ch1, *ch2; 6688 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6689 int rc; 6690 6691 ut_init_trid(&trid); 6692 TAILQ_INIT(&ctrlr.active_io_qpairs); 6693 6694 set_thread(0); 6695 6696 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6697 CU_ASSERT(rc == 0); 6698 6699 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6700 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6701 6702 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6703 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6704 6705 ch1 = spdk_get_io_channel(nvme_ctrlr); 6706 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6707 6708 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6709 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6710 6711 set_thread(1); 6712 6713 ch2 = spdk_get_io_channel(nvme_ctrlr); 6714 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6715 6716 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6717 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6718 6719 /* Reset starts from thread 1. */ 6720 set_thread(1); 6721 6722 nvme_ctrlr->resetting = false; 6723 curr_trid->last_failed_tsc = spdk_get_ticks(); 6724 ctrlr.is_failed = true; 6725 6726 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6727 CU_ASSERT(rc == 0); 6728 CU_ASSERT(nvme_ctrlr->resetting == true); 6729 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6730 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6731 6732 poll_thread_times(0, 3); 6733 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6734 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6735 6736 poll_thread_times(0, 1); 6737 poll_thread_times(1, 1); 6738 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6739 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6740 CU_ASSERT(ctrlr.is_failed == true); 6741 6742 poll_thread_times(1, 1); 6743 poll_thread_times(0, 1); 6744 CU_ASSERT(ctrlr.is_failed == false); 6745 CU_ASSERT(ctrlr.adminq.is_connected == false); 6746 6747 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6748 poll_thread_times(0, 2); 6749 CU_ASSERT(ctrlr.adminq.is_connected == true); 6750 6751 poll_thread_times(0, 1); 6752 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6753 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6754 6755 poll_thread_times(1, 1); 6756 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6757 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6758 CU_ASSERT(nvme_ctrlr->resetting == true); 6759 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6760 6761 poll_thread_times(0, 2); 6762 CU_ASSERT(nvme_ctrlr->resetting == true); 6763 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6764 poll_thread_times(1, 1); 6765 CU_ASSERT(nvme_ctrlr->resetting == true); 6766 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6767 6768 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6769 * 6770 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6771 * connect command is executed. If fabric connect command gets timeout, 6772 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6773 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6774 * 6775 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6776 */ 6777 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6778 CU_ASSERT(rc == -EINPROGRESS); 6779 CU_ASSERT(nvme_ctrlr->resetting == true); 6780 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6781 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6782 6783 poll_thread_times(0, 1); 6784 6785 CU_ASSERT(nvme_ctrlr->resetting == true); 6786 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6787 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6788 6789 poll_threads(); 6790 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6791 poll_threads(); 6792 6793 CU_ASSERT(nvme_ctrlr->resetting == false); 6794 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6795 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6796 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6797 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6798 6799 spdk_put_io_channel(ch2); 6800 6801 set_thread(0); 6802 6803 spdk_put_io_channel(ch1); 6804 6805 poll_threads(); 6806 6807 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6808 CU_ASSERT(rc == 0); 6809 6810 poll_threads(); 6811 spdk_delay_us(1000); 6812 poll_threads(); 6813 6814 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6815 } 6816 static void 6817 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6818 { 6819 int *_rc = (int *)cb_arg; 6820 6821 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6822 *_rc = rc; 6823 } 6824 6825 static void 6826 test_ctrlr_op_rpc(void) 6827 { 6828 struct spdk_nvme_transport_id trid = {}; 6829 struct spdk_nvme_ctrlr ctrlr = {}; 6830 struct nvme_ctrlr *nvme_ctrlr = NULL; 6831 struct nvme_path_id *curr_trid; 6832 struct spdk_io_channel *ch1, *ch2; 6833 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6834 int ctrlr_op_rc; 6835 int rc; 6836 6837 ut_init_trid(&trid); 6838 TAILQ_INIT(&ctrlr.active_io_qpairs); 6839 6840 set_thread(0); 6841 6842 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6843 CU_ASSERT(rc == 0); 6844 6845 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6846 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6847 6848 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6849 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6850 6851 ch1 = spdk_get_io_channel(nvme_ctrlr); 6852 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6853 6854 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6855 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6856 6857 set_thread(1); 6858 6859 ch2 = spdk_get_io_channel(nvme_ctrlr); 6860 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6861 6862 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6863 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6864 6865 /* Reset starts from thread 1. */ 6866 set_thread(1); 6867 6868 /* Case 1: ctrlr is already being destructed. */ 6869 nvme_ctrlr->destruct = true; 6870 ctrlr_op_rc = 0; 6871 6872 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6873 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6874 6875 poll_threads(); 6876 6877 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6878 6879 /* Case 2: reset is in progress. */ 6880 nvme_ctrlr->destruct = false; 6881 nvme_ctrlr->resetting = true; 6882 ctrlr_op_rc = 0; 6883 6884 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6885 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6886 6887 poll_threads(); 6888 6889 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6890 6891 /* Case 3: reset completes successfully. */ 6892 nvme_ctrlr->resetting = false; 6893 curr_trid->last_failed_tsc = spdk_get_ticks(); 6894 ctrlr.is_failed = true; 6895 ctrlr_op_rc = -1; 6896 6897 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6898 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6899 6900 CU_ASSERT(nvme_ctrlr->resetting == true); 6901 CU_ASSERT(ctrlr_op_rc == -1); 6902 6903 poll_threads(); 6904 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6905 poll_threads(); 6906 6907 CU_ASSERT(nvme_ctrlr->resetting == false); 6908 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6909 CU_ASSERT(ctrlr.is_failed == false); 6910 CU_ASSERT(ctrlr_op_rc == 0); 6911 6912 /* Case 4: invalid operation. */ 6913 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6914 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6915 6916 poll_threads(); 6917 6918 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6919 6920 spdk_put_io_channel(ch2); 6921 6922 set_thread(0); 6923 6924 spdk_put_io_channel(ch1); 6925 6926 poll_threads(); 6927 6928 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6929 CU_ASSERT(rc == 0); 6930 6931 poll_threads(); 6932 spdk_delay_us(1000); 6933 poll_threads(); 6934 6935 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6936 } 6937 6938 static void 6939 test_bdev_ctrlr_op_rpc(void) 6940 { 6941 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6942 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6943 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6944 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6945 struct nvme_path_id *curr_trid1, *curr_trid2; 6946 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6947 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6948 int ctrlr_op_rc; 6949 int rc; 6950 6951 ut_init_trid(&trid1); 6952 ut_init_trid2(&trid2); 6953 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6954 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6955 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6956 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6957 ctrlr1.cdata.cntlid = 1; 6958 ctrlr2.cdata.cntlid = 2; 6959 ctrlr1.adminq.is_connected = true; 6960 ctrlr2.adminq.is_connected = true; 6961 6962 set_thread(0); 6963 6964 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6965 CU_ASSERT(rc == 0); 6966 6967 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6968 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6969 6970 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN); 6971 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6972 6973 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6974 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6975 6976 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6977 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6978 6979 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6980 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6981 6982 set_thread(1); 6983 6984 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6985 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6986 6987 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6988 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6989 6990 set_thread(0); 6991 6992 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6993 CU_ASSERT(rc == 0); 6994 6995 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN); 6996 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6997 6998 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 6999 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 7000 7001 ch21 = spdk_get_io_channel(nvme_ctrlr2); 7002 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 7003 7004 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 7005 CU_ASSERT(ctrlr_ch21->qpair != NULL); 7006 7007 set_thread(1); 7008 7009 ch22 = spdk_get_io_channel(nvme_ctrlr2); 7010 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 7011 7012 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 7013 CU_ASSERT(ctrlr_ch22->qpair != NULL); 7014 7015 /* Reset starts from thread 1. */ 7016 set_thread(1); 7017 7018 nvme_ctrlr1->resetting = false; 7019 nvme_ctrlr2->resetting = false; 7020 curr_trid1->last_failed_tsc = spdk_get_ticks(); 7021 curr_trid2->last_failed_tsc = spdk_get_ticks(); 7022 ctrlr_op_rc = -1; 7023 7024 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 7025 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 7026 7027 CU_ASSERT(nvme_ctrlr1->resetting == true); 7028 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7029 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7030 CU_ASSERT(nvme_ctrlr2->resetting == false); 7031 7032 poll_thread_times(0, 3); 7033 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7034 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7035 7036 poll_thread_times(0, 1); 7037 poll_thread_times(1, 1); 7038 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7039 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7040 7041 poll_thread_times(1, 1); 7042 poll_thread_times(0, 1); 7043 CU_ASSERT(ctrlr1.adminq.is_connected == false); 7044 7045 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7046 poll_thread_times(0, 2); 7047 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7048 7049 poll_thread_times(0, 1); 7050 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7051 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7052 7053 poll_thread_times(1, 1); 7054 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7055 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7056 CU_ASSERT(nvme_ctrlr1->resetting == true); 7057 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7058 7059 poll_thread_times(0, 2); 7060 poll_thread_times(1, 1); 7061 poll_thread_times(0, 1); 7062 poll_thread_times(1, 1); 7063 poll_thread_times(0, 1); 7064 poll_thread_times(1, 1); 7065 poll_thread_times(0, 1); 7066 7067 CU_ASSERT(nvme_ctrlr1->resetting == false); 7068 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7069 CU_ASSERT(nvme_ctrlr2->resetting == true); 7070 7071 poll_threads(); 7072 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7073 poll_threads(); 7074 7075 CU_ASSERT(nvme_ctrlr2->resetting == false); 7076 CU_ASSERT(ctrlr_op_rc == 0); 7077 7078 set_thread(1); 7079 7080 spdk_put_io_channel(ch12); 7081 spdk_put_io_channel(ch22); 7082 7083 set_thread(0); 7084 7085 spdk_put_io_channel(ch11); 7086 spdk_put_io_channel(ch21); 7087 7088 poll_threads(); 7089 7090 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7091 CU_ASSERT(rc == 0); 7092 7093 poll_threads(); 7094 spdk_delay_us(1000); 7095 poll_threads(); 7096 7097 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7098 } 7099 7100 static void 7101 test_disable_enable_ctrlr(void) 7102 { 7103 struct spdk_nvme_transport_id trid = {}; 7104 struct spdk_nvme_ctrlr ctrlr = {}; 7105 struct nvme_ctrlr *nvme_ctrlr = NULL; 7106 struct nvme_path_id *curr_trid; 7107 struct spdk_io_channel *ch1, *ch2; 7108 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7109 int rc; 7110 7111 ut_init_trid(&trid); 7112 TAILQ_INIT(&ctrlr.active_io_qpairs); 7113 ctrlr.adminq.is_connected = true; 7114 7115 set_thread(0); 7116 7117 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7118 CU_ASSERT(rc == 0); 7119 7120 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7121 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7122 7123 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7124 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7125 7126 ch1 = spdk_get_io_channel(nvme_ctrlr); 7127 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7128 7129 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7130 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7131 7132 set_thread(1); 7133 7134 ch2 = spdk_get_io_channel(nvme_ctrlr); 7135 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7136 7137 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7138 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7139 7140 /* Disable starts from thread 1. */ 7141 set_thread(1); 7142 7143 /* Case 1: ctrlr is already disabled. */ 7144 nvme_ctrlr->disabled = true; 7145 7146 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7147 CU_ASSERT(rc == -EALREADY); 7148 7149 /* Case 2: ctrlr is already being destructed. */ 7150 nvme_ctrlr->disabled = false; 7151 nvme_ctrlr->destruct = true; 7152 7153 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7154 CU_ASSERT(rc == -ENXIO); 7155 7156 /* Case 3: reset is in progress. */ 7157 nvme_ctrlr->destruct = false; 7158 nvme_ctrlr->resetting = true; 7159 7160 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7161 CU_ASSERT(rc == -EBUSY); 7162 7163 /* Case 4: disable completes successfully. */ 7164 nvme_ctrlr->resetting = false; 7165 7166 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7167 CU_ASSERT(rc == 0); 7168 CU_ASSERT(nvme_ctrlr->resetting == true); 7169 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7170 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7171 7172 poll_thread_times(0, 3); 7173 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7174 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7175 7176 poll_thread_times(0, 1); 7177 poll_thread_times(1, 1); 7178 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7179 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7180 7181 poll_thread_times(1, 1); 7182 poll_thread_times(0, 1); 7183 CU_ASSERT(ctrlr.adminq.is_connected == false); 7184 poll_thread_times(1, 1); 7185 poll_thread_times(0, 1); 7186 poll_thread_times(1, 1); 7187 poll_thread_times(0, 1); 7188 CU_ASSERT(nvme_ctrlr->resetting == false); 7189 CU_ASSERT(nvme_ctrlr->disabled == true); 7190 7191 /* Case 5: enable completes successfully. */ 7192 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7193 CU_ASSERT(rc == 0); 7194 7195 CU_ASSERT(nvme_ctrlr->resetting == true); 7196 CU_ASSERT(nvme_ctrlr->disabled == false); 7197 7198 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7199 poll_thread_times(0, 2); 7200 CU_ASSERT(ctrlr.adminq.is_connected == true); 7201 7202 poll_thread_times(0, 1); 7203 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7204 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7205 7206 poll_thread_times(1, 1); 7207 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7208 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7209 CU_ASSERT(nvme_ctrlr->resetting == true); 7210 7211 poll_thread_times(0, 2); 7212 CU_ASSERT(nvme_ctrlr->resetting == true); 7213 poll_thread_times(1, 1); 7214 CU_ASSERT(nvme_ctrlr->resetting == true); 7215 poll_thread_times(0, 1); 7216 CU_ASSERT(nvme_ctrlr->resetting == false); 7217 7218 /* Case 6: ctrlr is already enabled. */ 7219 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7220 CU_ASSERT(rc == -EALREADY); 7221 7222 set_thread(0); 7223 7224 /* Case 7: disable cancels delayed reconnect. */ 7225 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7226 ctrlr.fail_reset = true; 7227 7228 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7229 CU_ASSERT(rc == 0); 7230 7231 poll_threads(); 7232 7233 CU_ASSERT(nvme_ctrlr->resetting == false); 7234 CU_ASSERT(ctrlr.is_failed == false); 7235 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7236 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7237 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7238 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7239 7240 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7241 CU_ASSERT(rc == 0); 7242 7243 CU_ASSERT(nvme_ctrlr->resetting == true); 7244 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7245 7246 poll_threads(); 7247 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7248 poll_threads(); 7249 7250 CU_ASSERT(nvme_ctrlr->resetting == false); 7251 CU_ASSERT(nvme_ctrlr->disabled == true); 7252 7253 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7254 CU_ASSERT(rc == 0); 7255 7256 CU_ASSERT(nvme_ctrlr->resetting == true); 7257 CU_ASSERT(nvme_ctrlr->disabled == false); 7258 7259 poll_threads(); 7260 7261 CU_ASSERT(nvme_ctrlr->resetting == false); 7262 7263 set_thread(1); 7264 7265 spdk_put_io_channel(ch2); 7266 7267 set_thread(0); 7268 7269 spdk_put_io_channel(ch1); 7270 7271 poll_threads(); 7272 7273 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7274 CU_ASSERT(rc == 0); 7275 7276 poll_threads(); 7277 spdk_delay_us(1000); 7278 poll_threads(); 7279 7280 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7281 } 7282 7283 static void 7284 ut_delete_done(void *ctx, int rc) 7285 { 7286 int *delete_done_rc = ctx; 7287 *delete_done_rc = rc; 7288 } 7289 7290 static void 7291 test_delete_ctrlr_done(void) 7292 { 7293 struct spdk_nvme_transport_id trid = {}; 7294 struct spdk_nvme_ctrlr ctrlr = {}; 7295 int delete_done_rc = 0xDEADBEEF; 7296 int rc; 7297 7298 ut_init_trid(&trid); 7299 7300 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7301 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7302 7303 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7304 CU_ASSERT(rc == 0); 7305 7306 for (int i = 0; i < 20; i++) { 7307 poll_threads(); 7308 if (delete_done_rc == 0) { 7309 break; 7310 } 7311 spdk_delay_us(1000); 7312 } 7313 7314 CU_ASSERT(delete_done_rc == 0); 7315 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7316 } 7317 7318 static void 7319 test_ns_remove_during_reset(void) 7320 { 7321 struct nvme_path_id path = {}; 7322 struct nvme_ctrlr_opts opts = {}; 7323 struct spdk_nvme_ctrlr *ctrlr; 7324 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7325 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7326 struct nvme_ctrlr *nvme_ctrlr; 7327 const int STRING_SIZE = 32; 7328 const char *attached_names[STRING_SIZE]; 7329 struct nvme_bdev *bdev; 7330 struct nvme_ns *nvme_ns; 7331 union spdk_nvme_async_event_completion event = {}; 7332 struct spdk_nvme_cpl cpl = {}; 7333 int rc; 7334 7335 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7336 ut_init_trid(&path.trid); 7337 7338 set_thread(0); 7339 7340 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7341 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7342 7343 g_ut_attach_ctrlr_status = 0; 7344 g_ut_attach_bdev_count = 1; 7345 7346 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7347 attach_ctrlr_done, NULL, &dopts, &opts, false); 7348 CU_ASSERT(rc == 0); 7349 7350 spdk_delay_us(1000); 7351 poll_threads(); 7352 7353 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7354 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7355 7356 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 7357 CU_ASSERT(nvme_ctrlr != NULL); 7358 7359 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7360 CU_ASSERT(bdev != NULL); 7361 7362 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7363 CU_ASSERT(nvme_ns != NULL); 7364 7365 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7366 * but nvme_ns->ns should be NULL. 7367 */ 7368 7369 CU_ASSERT(ctrlr->ns[0].is_active == true); 7370 ctrlr->ns[0].is_active = false; 7371 7372 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7373 CU_ASSERT(rc == 0); 7374 7375 poll_threads(); 7376 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7377 poll_threads(); 7378 7379 CU_ASSERT(nvme_ctrlr->resetting == false); 7380 CU_ASSERT(ctrlr->adminq.is_connected == true); 7381 7382 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7383 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7384 CU_ASSERT(nvme_ns->bdev == bdev); 7385 CU_ASSERT(nvme_ns->ns == NULL); 7386 7387 /* Then, async event should fill nvme_ns->ns again. */ 7388 7389 ctrlr->ns[0].is_active = true; 7390 7391 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7392 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7393 cpl.cdw0 = event.raw; 7394 7395 aer_cb(nvme_ctrlr, &cpl); 7396 7397 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7398 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7399 CU_ASSERT(nvme_ns->bdev == bdev); 7400 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7401 7402 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7403 CU_ASSERT(rc == 0); 7404 7405 poll_threads(); 7406 spdk_delay_us(1000); 7407 poll_threads(); 7408 7409 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7410 } 7411 7412 static void 7413 test_io_path_is_current(void) 7414 { 7415 struct nvme_bdev_channel nbdev_ch = { 7416 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 7417 }; 7418 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 7419 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 7420 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 7421 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }, 7422 nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 7423 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {}; 7424 struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 7425 struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 7426 struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, }; 7427 struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, }; 7428 struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, }; 7429 struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, }; 7430 struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 7431 struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 7432 struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 7433 7434 /* io_path1 is deleting */ 7435 io_path1.nbdev_ch = NULL; 7436 7437 CU_ASSERT(nvme_io_path_is_current(&io_path1) == false); 7438 7439 io_path1.nbdev_ch = &nbdev_ch; 7440 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 7441 io_path2.nbdev_ch = &nbdev_ch; 7442 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 7443 io_path3.nbdev_ch = &nbdev_ch; 7444 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 7445 7446 /* active/active: io_path is current if it is available and ANA optimized. */ 7447 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7448 7449 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7450 7451 /* active/active: io_path is not current if it is disconnected even if it is 7452 * ANA optimized. 7453 */ 7454 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7455 7456 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7457 7458 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7459 7460 /* active/passive: io_path is current if it is available and cached. 7461 * (only ANA optimized path is cached for active/passive.) 7462 */ 7463 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7464 nbdev_ch.current_io_path = &io_path2; 7465 7466 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7467 7468 /* active:passive: io_path is not current if it is disconnected even if it is cached */ 7469 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7470 7471 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7472 7473 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7474 7475 /* active/active and active/passive: io_path is not current if it is ANA inaccessible. */ 7476 nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 7477 7478 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7479 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7480 7481 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7482 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7483 7484 /* active/active: non-optimized path is current only if there is no optimized path. */ 7485 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7486 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7487 7488 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7489 7490 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7491 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7492 7493 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7494 7495 /* active/passive: current is true if it is the first one when there is no optimized path. */ 7496 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7497 nbdev_ch.current_io_path = NULL; 7498 7499 CU_ASSERT(nvme_io_path_is_current(&io_path1) == true); 7500 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7501 CU_ASSERT(nvme_io_path_is_current(&io_path3) == false); 7502 } 7503 7504 int 7505 main(int argc, char **argv) 7506 { 7507 CU_pSuite suite = NULL; 7508 unsigned int num_failures; 7509 7510 CU_initialize_registry(); 7511 7512 suite = CU_add_suite("nvme", NULL, NULL); 7513 7514 CU_ADD_TEST(suite, test_create_ctrlr); 7515 CU_ADD_TEST(suite, test_reset_ctrlr); 7516 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7517 CU_ADD_TEST(suite, test_failover_ctrlr); 7518 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7519 CU_ADD_TEST(suite, test_pending_reset); 7520 CU_ADD_TEST(suite, test_attach_ctrlr); 7521 CU_ADD_TEST(suite, test_aer_cb); 7522 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7523 CU_ADD_TEST(suite, test_add_remove_trid); 7524 CU_ADD_TEST(suite, test_abort); 7525 CU_ADD_TEST(suite, test_get_io_qpair); 7526 CU_ADD_TEST(suite, test_bdev_unregister); 7527 CU_ADD_TEST(suite, test_compare_ns); 7528 CU_ADD_TEST(suite, test_init_ana_log_page); 7529 CU_ADD_TEST(suite, test_get_memory_domains); 7530 CU_ADD_TEST(suite, test_reconnect_qpair); 7531 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7532 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7533 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7534 CU_ADD_TEST(suite, test_admin_path); 7535 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7536 CU_ADD_TEST(suite, test_find_io_path); 7537 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7538 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7539 CU_ADD_TEST(suite, test_retry_io_count); 7540 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7541 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7542 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7543 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7544 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7545 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7546 CU_ADD_TEST(suite, test_fail_path); 7547 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7548 CU_ADD_TEST(suite, test_ana_transition); 7549 CU_ADD_TEST(suite, test_set_preferred_path); 7550 CU_ADD_TEST(suite, test_find_next_io_path); 7551 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7552 CU_ADD_TEST(suite, test_disable_auto_failback); 7553 CU_ADD_TEST(suite, test_set_multipath_policy); 7554 CU_ADD_TEST(suite, test_uuid_generation); 7555 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7556 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7557 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7558 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7559 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7560 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7561 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7562 CU_ADD_TEST(suite, test_io_path_is_current); 7563 7564 allocate_threads(3); 7565 set_thread(0); 7566 bdev_nvme_library_init(); 7567 init_accel(); 7568 7569 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7570 7571 set_thread(0); 7572 bdev_nvme_library_fini(); 7573 fini_accel(); 7574 free_threads(); 7575 7576 CU_cleanup_registry(); 7577 7578 return num_failures; 7579 } 7580