1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 65 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 66 67 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 68 69 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 70 enum spdk_bdev_reset_stat_mode mode)); 71 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 72 struct spdk_bdev_io_stat *add)); 73 74 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 75 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 76 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 77 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 78 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0); 79 80 int 81 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 82 struct spdk_memory_domain **domains, int array_size) 83 { 84 int i, min_array_size; 85 86 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 87 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 88 for (i = 0; i < min_array_size; i++) { 89 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 90 } 91 } 92 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 93 94 return 0; 95 } 96 97 struct spdk_io_channel * 98 spdk_accel_get_io_channel(void) 99 { 100 return spdk_get_io_channel(g_accel_p); 101 } 102 103 void 104 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 105 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 106 { 107 /* Avoid warning that opts is used uninitialised */ 108 memset(opts, 0, opts_size); 109 } 110 111 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003" 112 113 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN}; 114 115 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 116 (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts); 117 118 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 119 (const struct spdk_nvme_ctrlr *ctrlr), 0); 120 121 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 122 (struct spdk_nvme_ctrlr *ctrlr), NULL); 123 124 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 125 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 126 127 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 128 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 129 130 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 133 134 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 135 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 136 137 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 138 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 139 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 140 141 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 142 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 143 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 144 145 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 146 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 147 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 148 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 149 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 150 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 151 152 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 153 size_t *size), 0); 154 155 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 156 157 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 158 159 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 160 161 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns), 164 SPDK_NVME_16B_GUARD_PI); 165 166 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 167 168 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 169 170 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 171 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 172 173 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 174 175 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 176 char *name, size_t *size), 0); 177 178 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 179 (struct spdk_nvme_ns *ns), 0); 180 181 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 182 (const struct spdk_nvme_ctrlr *ctrlr), 0); 183 184 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 185 (struct spdk_nvme_ns *ns), 0); 186 187 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 188 (struct spdk_nvme_ns *ns), 0); 189 190 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 191 (struct spdk_nvme_ns *ns), 0); 192 193 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 194 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 195 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 196 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 197 198 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 199 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 200 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 201 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 202 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 203 204 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 205 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 206 void *payload, uint32_t payload_size, uint64_t slba, 207 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 208 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 209 210 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 211 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 212 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 213 214 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 215 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 216 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 217 218 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 219 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 220 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 221 222 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 223 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 224 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 225 226 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 227 228 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 229 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 230 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 231 232 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 233 (const struct spdk_nvme_status *status), NULL); 234 235 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 236 (const struct spdk_nvme_status *status), NULL); 237 238 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 239 240 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 241 242 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 243 244 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 245 246 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 247 248 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 249 struct iovec *iov, 250 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 251 DEFINE_STUB(spdk_accel_append_crc32c, int, 252 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 253 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 254 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 255 DEFINE_STUB_V(spdk_accel_sequence_finish, 256 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 257 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 258 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 259 260 struct ut_nvme_req { 261 uint16_t opc; 262 spdk_nvme_cmd_cb cb_fn; 263 void *cb_arg; 264 struct spdk_nvme_cpl cpl; 265 TAILQ_ENTRY(ut_nvme_req) tailq; 266 }; 267 268 struct spdk_nvme_ns { 269 struct spdk_nvme_ctrlr *ctrlr; 270 uint32_t id; 271 bool is_active; 272 struct spdk_uuid *uuid; 273 enum spdk_nvme_ana_state ana_state; 274 enum spdk_nvme_csi csi; 275 }; 276 277 struct spdk_nvme_qpair { 278 struct spdk_nvme_ctrlr *ctrlr; 279 uint8_t failure_reason; 280 bool is_connected; 281 bool in_completion_context; 282 bool delete_after_completion_context; 283 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 284 uint32_t num_outstanding_reqs; 285 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 286 struct spdk_nvme_poll_group *poll_group; 287 void *poll_group_tailq_head; 288 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 289 }; 290 291 struct spdk_nvme_ctrlr { 292 uint32_t num_ns; 293 struct spdk_nvme_ns *ns; 294 struct spdk_nvme_ns_data *nsdata; 295 struct spdk_nvme_qpair adminq; 296 struct spdk_nvme_ctrlr_data cdata; 297 bool attached; 298 bool is_failed; 299 bool fail_reset; 300 bool is_removed; 301 struct spdk_nvme_transport_id trid; 302 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 303 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 304 struct spdk_nvme_ctrlr_opts opts; 305 }; 306 307 struct spdk_nvme_poll_group { 308 void *ctx; 309 struct spdk_nvme_accel_fn_table accel_fn_table; 310 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 311 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 312 }; 313 314 struct spdk_nvme_probe_ctx { 315 struct spdk_nvme_transport_id trid; 316 void *cb_ctx; 317 spdk_nvme_attach_cb attach_cb; 318 struct spdk_nvme_ctrlr *init_ctrlr; 319 }; 320 321 uint32_t 322 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 323 { 324 uint32_t nsid; 325 326 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 327 if (ctrlr->ns[nsid - 1].is_active) { 328 return nsid; 329 } 330 } 331 332 return 0; 333 } 334 335 uint32_t 336 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 337 { 338 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 339 if (ctrlr->ns[nsid - 1].is_active) { 340 return nsid; 341 } 342 } 343 344 return 0; 345 } 346 347 uint32_t 348 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 349 { 350 return qpair->num_outstanding_reqs; 351 } 352 353 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 354 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 355 g_ut_attached_ctrlrs); 356 static int g_ut_attach_ctrlr_status; 357 static size_t g_ut_attach_bdev_count; 358 static int g_ut_register_bdev_status; 359 static struct spdk_bdev *g_ut_registered_bdev; 360 static uint16_t g_ut_cntlid; 361 static struct nvme_path_id g_any_path = {}; 362 363 static void 364 ut_init_trid(struct spdk_nvme_transport_id *trid) 365 { 366 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 367 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 368 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 369 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 370 } 371 372 static void 373 ut_init_trid2(struct spdk_nvme_transport_id *trid) 374 { 375 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 376 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 377 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 378 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 379 } 380 381 static void 382 ut_init_trid3(struct spdk_nvme_transport_id *trid) 383 { 384 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 385 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 386 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 387 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 388 } 389 390 static int 391 cmp_int(int a, int b) 392 { 393 return a - b; 394 } 395 396 int 397 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 398 const struct spdk_nvme_transport_id *trid2) 399 { 400 int cmp; 401 402 /* We assume trtype is TCP for now. */ 403 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 404 405 cmp = cmp_int(trid1->trtype, trid2->trtype); 406 if (cmp) { 407 return cmp; 408 } 409 410 cmp = strcasecmp(trid1->traddr, trid2->traddr); 411 if (cmp) { 412 return cmp; 413 } 414 415 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 416 if (cmp) { 417 return cmp; 418 } 419 420 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 421 if (cmp) { 422 return cmp; 423 } 424 425 cmp = strcmp(trid1->subnqn, trid2->subnqn); 426 if (cmp) { 427 return cmp; 428 } 429 430 return 0; 431 } 432 433 static struct spdk_nvme_ctrlr * 434 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 435 bool ana_reporting, bool multipath) 436 { 437 struct spdk_nvme_ctrlr *ctrlr; 438 uint32_t i; 439 440 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 441 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 442 /* There is a ctrlr whose trid matches. */ 443 return NULL; 444 } 445 } 446 447 ctrlr = calloc(1, sizeof(*ctrlr)); 448 if (ctrlr == NULL) { 449 return NULL; 450 } 451 452 ctrlr->attached = true; 453 ctrlr->adminq.ctrlr = ctrlr; 454 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 455 ctrlr->adminq.is_connected = true; 456 457 if (num_ns != 0) { 458 ctrlr->num_ns = num_ns; 459 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 460 if (ctrlr->ns == NULL) { 461 free(ctrlr); 462 return NULL; 463 } 464 465 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 466 if (ctrlr->nsdata == NULL) { 467 free(ctrlr->ns); 468 free(ctrlr); 469 return NULL; 470 } 471 472 for (i = 0; i < num_ns; i++) { 473 ctrlr->ns[i].id = i + 1; 474 ctrlr->ns[i].ctrlr = ctrlr; 475 ctrlr->ns[i].is_active = true; 476 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 477 ctrlr->nsdata[i].nsze = 1024; 478 ctrlr->nsdata[i].nmic.can_share = multipath; 479 } 480 481 ctrlr->cdata.nn = num_ns; 482 ctrlr->cdata.mnan = num_ns; 483 ctrlr->cdata.nanagrpid = num_ns; 484 } 485 486 ctrlr->cdata.cntlid = ++g_ut_cntlid; 487 ctrlr->cdata.cmic.multi_ctrlr = multipath; 488 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 489 ctrlr->trid = *trid; 490 TAILQ_INIT(&ctrlr->active_io_qpairs); 491 492 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 493 494 return ctrlr; 495 } 496 497 static void 498 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 499 { 500 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 501 502 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 503 free(ctrlr->nsdata); 504 free(ctrlr->ns); 505 free(ctrlr); 506 } 507 508 static int 509 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 510 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 511 { 512 struct ut_nvme_req *req; 513 514 req = calloc(1, sizeof(*req)); 515 if (req == NULL) { 516 return -ENOMEM; 517 } 518 519 req->opc = opc; 520 req->cb_fn = cb_fn; 521 req->cb_arg = cb_arg; 522 523 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 524 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 525 526 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 527 qpair->num_outstanding_reqs++; 528 529 return 0; 530 } 531 532 static struct ut_nvme_req * 533 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 534 { 535 struct ut_nvme_req *req; 536 537 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 538 if (req->cb_arg == cb_arg) { 539 break; 540 } 541 } 542 543 return req; 544 } 545 546 static struct spdk_bdev_io * 547 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 548 struct spdk_io_channel *ch) 549 { 550 struct spdk_bdev_io *bdev_io; 551 552 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 553 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 554 bdev_io->type = type; 555 bdev_io->bdev = &nbdev->disk; 556 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 557 558 return bdev_io; 559 } 560 561 static void 562 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 563 { 564 bdev_io->u.bdev.iovs = &bdev_io->iov; 565 bdev_io->u.bdev.iovcnt = 1; 566 567 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 568 bdev_io->iov.iov_len = 4096; 569 } 570 571 static void 572 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 573 { 574 if (ctrlr->is_failed) { 575 free(ctrlr); 576 return; 577 } 578 579 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 580 if (probe_ctx->cb_ctx) { 581 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 582 } 583 584 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 585 586 if (probe_ctx->attach_cb) { 587 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 588 } 589 } 590 591 int 592 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 593 { 594 struct spdk_nvme_ctrlr *ctrlr, *tmp; 595 596 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 597 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 598 continue; 599 } 600 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 601 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 602 } 603 604 free(probe_ctx); 605 606 return 0; 607 } 608 609 struct spdk_nvme_probe_ctx * 610 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 611 const struct spdk_nvme_ctrlr_opts *opts, 612 spdk_nvme_attach_cb attach_cb) 613 { 614 struct spdk_nvme_probe_ctx *probe_ctx; 615 616 if (trid == NULL) { 617 return NULL; 618 } 619 620 probe_ctx = calloc(1, sizeof(*probe_ctx)); 621 if (probe_ctx == NULL) { 622 return NULL; 623 } 624 625 probe_ctx->trid = *trid; 626 probe_ctx->cb_ctx = (void *)opts; 627 probe_ctx->attach_cb = attach_cb; 628 629 return probe_ctx; 630 } 631 632 int 633 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 634 { 635 if (ctrlr->attached) { 636 ut_detach_ctrlr(ctrlr); 637 } 638 639 return 0; 640 } 641 642 int 643 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 644 { 645 SPDK_CU_ASSERT_FATAL(ctx != NULL); 646 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 647 648 return 0; 649 } 650 651 int 652 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 653 { 654 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 655 } 656 657 void 658 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 659 { 660 memset(opts, 0, opts_size); 661 662 snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN); 663 } 664 665 const struct spdk_nvme_ctrlr_data * 666 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 667 { 668 return &ctrlr->cdata; 669 } 670 671 uint32_t 672 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 673 { 674 return ctrlr->num_ns; 675 } 676 677 struct spdk_nvme_ns * 678 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 679 { 680 if (nsid < 1 || nsid > ctrlr->num_ns) { 681 return NULL; 682 } 683 684 return &ctrlr->ns[nsid - 1]; 685 } 686 687 bool 688 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 689 { 690 if (nsid < 1 || nsid > ctrlr->num_ns) { 691 return false; 692 } 693 694 return ctrlr->ns[nsid - 1].is_active; 695 } 696 697 union spdk_nvme_csts_register 698 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 699 { 700 union spdk_nvme_csts_register csts; 701 702 csts.raw = 0; 703 704 return csts; 705 } 706 707 union spdk_nvme_vs_register 708 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 709 { 710 union spdk_nvme_vs_register vs; 711 712 vs.raw = 0; 713 714 return vs; 715 } 716 717 struct spdk_nvme_qpair * 718 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 719 const struct spdk_nvme_io_qpair_opts *user_opts, 720 size_t opts_size) 721 { 722 struct spdk_nvme_qpair *qpair; 723 724 qpair = calloc(1, sizeof(*qpair)); 725 if (qpair == NULL) { 726 return NULL; 727 } 728 729 qpair->ctrlr = ctrlr; 730 TAILQ_INIT(&qpair->outstanding_reqs); 731 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 732 733 return qpair; 734 } 735 736 static void 737 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 738 { 739 struct spdk_nvme_poll_group *group = qpair->poll_group; 740 741 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 742 743 qpair->poll_group_tailq_head = &group->connected_qpairs; 744 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 745 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 746 } 747 748 static void 749 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 750 { 751 struct spdk_nvme_poll_group *group = qpair->poll_group; 752 753 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 754 755 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 756 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 757 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 758 } 759 760 int 761 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 762 struct spdk_nvme_qpair *qpair) 763 { 764 if (qpair->is_connected) { 765 return -EISCONN; 766 } 767 768 qpair->is_connected = true; 769 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 770 771 if (qpair->poll_group) { 772 nvme_poll_group_connect_qpair(qpair); 773 } 774 775 return 0; 776 } 777 778 void 779 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 780 { 781 if (!qpair->is_connected) { 782 return; 783 } 784 785 qpair->is_connected = false; 786 787 if (qpair->poll_group != NULL) { 788 nvme_poll_group_disconnect_qpair(qpair); 789 } 790 } 791 792 int 793 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 794 { 795 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 796 797 if (qpair->in_completion_context) { 798 qpair->delete_after_completion_context = true; 799 return 0; 800 } 801 802 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 803 804 if (qpair->poll_group != NULL) { 805 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 806 } 807 808 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 809 810 CU_ASSERT(qpair->num_outstanding_reqs == 0); 811 812 free(qpair); 813 814 return 0; 815 } 816 817 int 818 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 819 { 820 if (ctrlr->fail_reset) { 821 ctrlr->is_failed = true; 822 return -EIO; 823 } 824 825 ctrlr->adminq.is_connected = true; 826 return 0; 827 } 828 829 void 830 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 831 { 832 } 833 834 int 835 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 836 { 837 if (ctrlr->is_removed) { 838 return -ENXIO; 839 } 840 841 ctrlr->adminq.is_connected = false; 842 ctrlr->is_failed = false; 843 844 return 0; 845 } 846 847 void 848 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 849 { 850 ctrlr->is_failed = true; 851 } 852 853 bool 854 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 855 { 856 return ctrlr->is_failed; 857 } 858 859 spdk_nvme_qp_failure_reason 860 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 861 { 862 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 863 } 864 865 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 866 sizeof(uint32_t)) 867 static void 868 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 869 { 870 struct spdk_nvme_ana_page ana_hdr; 871 char _ana_desc[UT_ANA_DESC_SIZE]; 872 struct spdk_nvme_ana_group_descriptor *ana_desc; 873 struct spdk_nvme_ns *ns; 874 uint32_t i; 875 876 memset(&ana_hdr, 0, sizeof(ana_hdr)); 877 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 878 879 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 880 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 881 882 buf += sizeof(ana_hdr); 883 length -= sizeof(ana_hdr); 884 885 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 886 887 for (i = 0; i < ctrlr->num_ns; i++) { 888 ns = &ctrlr->ns[i]; 889 890 if (!ns->is_active) { 891 continue; 892 } 893 894 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 895 896 ana_desc->ana_group_id = ns->id; 897 ana_desc->num_of_nsid = 1; 898 ana_desc->ana_state = ns->ana_state; 899 ana_desc->nsid[0] = ns->id; 900 901 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 902 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 903 904 buf += UT_ANA_DESC_SIZE; 905 length -= UT_ANA_DESC_SIZE; 906 } 907 } 908 909 int 910 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 911 uint8_t log_page, uint32_t nsid, 912 void *payload, uint32_t payload_size, 913 uint64_t offset, 914 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 915 { 916 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 917 SPDK_CU_ASSERT_FATAL(offset == 0); 918 ut_create_ana_log_page(ctrlr, payload, payload_size); 919 } 920 921 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 922 cb_fn, cb_arg); 923 } 924 925 int 926 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 927 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 928 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 929 { 930 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 931 } 932 933 int 934 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 935 void *cmd_cb_arg, 936 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 937 { 938 struct ut_nvme_req *req = NULL, *abort_req; 939 940 if (qpair == NULL) { 941 qpair = &ctrlr->adminq; 942 } 943 944 abort_req = calloc(1, sizeof(*abort_req)); 945 if (abort_req == NULL) { 946 return -ENOMEM; 947 } 948 949 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 950 if (req->cb_arg == cmd_cb_arg) { 951 break; 952 } 953 } 954 955 if (req == NULL) { 956 free(abort_req); 957 return -ENOENT; 958 } 959 960 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 961 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 962 963 abort_req->opc = SPDK_NVME_OPC_ABORT; 964 abort_req->cb_fn = cb_fn; 965 abort_req->cb_arg = cb_arg; 966 967 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 968 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 969 abort_req->cpl.cdw0 = 0; 970 971 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 972 ctrlr->adminq.num_outstanding_reqs++; 973 974 return 0; 975 } 976 977 int32_t 978 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 979 { 980 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 981 } 982 983 uint32_t 984 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 985 { 986 return ns->id; 987 } 988 989 struct spdk_nvme_ctrlr * 990 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 991 { 992 return ns->ctrlr; 993 } 994 995 static inline struct spdk_nvme_ns_data * 996 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 997 { 998 return &ns->ctrlr->nsdata[ns->id - 1]; 999 } 1000 1001 const struct spdk_nvme_ns_data * 1002 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 1003 { 1004 return _nvme_ns_get_data(ns); 1005 } 1006 1007 uint64_t 1008 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1009 { 1010 return _nvme_ns_get_data(ns)->nsze; 1011 } 1012 1013 const struct spdk_uuid * 1014 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1015 { 1016 return ns->uuid; 1017 } 1018 1019 enum spdk_nvme_csi 1020 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1021 return ns->csi; 1022 } 1023 1024 int 1025 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1026 void *metadata, uint64_t lba, uint32_t lba_count, 1027 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1028 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1029 { 1030 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1031 } 1032 1033 int 1034 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1035 void *buffer, void *metadata, uint64_t lba, 1036 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1037 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1038 { 1039 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1040 } 1041 1042 int 1043 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1044 uint64_t lba, uint32_t lba_count, 1045 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1046 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1047 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1048 uint16_t apptag_mask, uint16_t apptag) 1049 { 1050 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1051 } 1052 1053 int 1054 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1055 uint64_t lba, uint32_t lba_count, 1056 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1057 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1058 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1059 uint16_t apptag_mask, uint16_t apptag) 1060 { 1061 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1062 } 1063 1064 static bool g_ut_readv_ext_called; 1065 int 1066 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1067 uint64_t lba, uint32_t lba_count, 1068 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1069 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1070 spdk_nvme_req_next_sge_cb next_sge_fn, 1071 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1072 { 1073 g_ut_readv_ext_called = true; 1074 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1075 } 1076 1077 static bool g_ut_read_ext_called; 1078 int 1079 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1080 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1081 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1082 { 1083 g_ut_read_ext_called = true; 1084 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1085 } 1086 1087 static bool g_ut_writev_ext_called; 1088 int 1089 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1090 uint64_t lba, uint32_t lba_count, 1091 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1092 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1093 spdk_nvme_req_next_sge_cb next_sge_fn, 1094 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1095 { 1096 g_ut_writev_ext_called = true; 1097 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1098 } 1099 1100 static bool g_ut_write_ext_called; 1101 int 1102 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1103 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1104 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1105 { 1106 g_ut_write_ext_called = true; 1107 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1108 } 1109 1110 int 1111 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1112 uint64_t lba, uint32_t lba_count, 1113 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1114 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1115 spdk_nvme_req_next_sge_cb next_sge_fn, 1116 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1117 { 1118 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1119 } 1120 1121 int 1122 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1123 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1124 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1125 { 1126 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1127 } 1128 1129 int 1130 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1131 uint64_t lba, uint32_t lba_count, 1132 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1133 uint32_t io_flags) 1134 { 1135 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1136 } 1137 1138 int 1139 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1140 const struct spdk_nvme_scc_source_range *ranges, 1141 uint16_t num_ranges, uint64_t dest_lba, 1142 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1143 { 1144 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1145 } 1146 1147 struct spdk_nvme_poll_group * 1148 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1149 { 1150 struct spdk_nvme_poll_group *group; 1151 1152 group = calloc(1, sizeof(*group)); 1153 if (group == NULL) { 1154 return NULL; 1155 } 1156 1157 group->ctx = ctx; 1158 if (table != NULL) { 1159 group->accel_fn_table = *table; 1160 } 1161 TAILQ_INIT(&group->connected_qpairs); 1162 TAILQ_INIT(&group->disconnected_qpairs); 1163 1164 return group; 1165 } 1166 1167 int 1168 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1169 { 1170 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1171 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1172 return -EBUSY; 1173 } 1174 1175 free(group); 1176 1177 return 0; 1178 } 1179 1180 spdk_nvme_qp_failure_reason 1181 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1182 { 1183 return qpair->failure_reason; 1184 } 1185 1186 bool 1187 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1188 { 1189 return qpair->is_connected; 1190 } 1191 1192 int32_t 1193 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1194 uint32_t max_completions) 1195 { 1196 struct ut_nvme_req *req, *tmp; 1197 uint32_t num_completions = 0; 1198 1199 if (!qpair->is_connected) { 1200 return -ENXIO; 1201 } 1202 1203 qpair->in_completion_context = true; 1204 1205 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1206 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1207 qpair->num_outstanding_reqs--; 1208 1209 req->cb_fn(req->cb_arg, &req->cpl); 1210 1211 free(req); 1212 num_completions++; 1213 } 1214 1215 qpair->in_completion_context = false; 1216 if (qpair->delete_after_completion_context) { 1217 spdk_nvme_ctrlr_free_io_qpair(qpair); 1218 } 1219 1220 return num_completions; 1221 } 1222 1223 int64_t 1224 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1225 uint32_t completions_per_qpair, 1226 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1227 { 1228 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1229 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1230 1231 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1232 1233 if (disconnected_qpair_cb == NULL) { 1234 return -EINVAL; 1235 } 1236 1237 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1238 disconnected_qpair_cb(qpair, group->ctx); 1239 } 1240 1241 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1242 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1243 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1244 /* Bump the number of completions so this counts as "busy" */ 1245 num_completions++; 1246 continue; 1247 } 1248 1249 local_completions = spdk_nvme_qpair_process_completions(qpair, 1250 completions_per_qpair); 1251 if (local_completions < 0 && error_reason == 0) { 1252 error_reason = local_completions; 1253 } else { 1254 num_completions += local_completions; 1255 assert(num_completions >= 0); 1256 } 1257 } 1258 1259 return error_reason ? error_reason : num_completions; 1260 } 1261 1262 int 1263 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1264 struct spdk_nvme_qpair *qpair) 1265 { 1266 CU_ASSERT(!qpair->is_connected); 1267 1268 qpair->poll_group = group; 1269 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1270 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1271 1272 return 0; 1273 } 1274 1275 int 1276 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1277 struct spdk_nvme_qpair *qpair) 1278 { 1279 CU_ASSERT(!qpair->is_connected); 1280 1281 if (qpair->poll_group == NULL) { 1282 return -ENOENT; 1283 } 1284 1285 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1286 1287 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1288 1289 qpair->poll_group = NULL; 1290 qpair->poll_group_tailq_head = NULL; 1291 1292 return 0; 1293 } 1294 1295 int 1296 spdk_bdev_register(struct spdk_bdev *bdev) 1297 { 1298 g_ut_registered_bdev = bdev; 1299 1300 return g_ut_register_bdev_status; 1301 } 1302 1303 void 1304 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1305 { 1306 int rc; 1307 1308 rc = bdev->fn_table->destruct(bdev->ctxt); 1309 1310 if (bdev == g_ut_registered_bdev) { 1311 g_ut_registered_bdev = NULL; 1312 } 1313 1314 if (rc <= 0 && cb_fn != NULL) { 1315 cb_fn(cb_arg, rc); 1316 } 1317 } 1318 1319 int 1320 spdk_bdev_open_ext(const char *bdev_name, bool write, 1321 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1322 struct spdk_bdev_desc **desc) 1323 { 1324 if (g_ut_registered_bdev == NULL || 1325 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1326 return -ENODEV; 1327 } 1328 1329 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1330 1331 return 0; 1332 } 1333 1334 struct spdk_bdev * 1335 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1336 { 1337 return (struct spdk_bdev *)desc; 1338 } 1339 1340 int 1341 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1342 { 1343 bdev->blockcnt = size; 1344 1345 return 0; 1346 } 1347 1348 struct spdk_io_channel * 1349 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1350 { 1351 return (struct spdk_io_channel *)bdev_io->internal.ch; 1352 } 1353 1354 struct spdk_thread * 1355 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1356 { 1357 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1358 } 1359 1360 void 1361 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1362 { 1363 bdev_io->internal.status = status; 1364 bdev_io->internal.in_submit_request = false; 1365 } 1366 1367 void 1368 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1369 { 1370 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1371 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1372 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1373 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1374 } else { 1375 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1376 } 1377 1378 bdev_io->internal.error.nvme.cdw0 = cdw0; 1379 bdev_io->internal.error.nvme.sct = sct; 1380 bdev_io->internal.error.nvme.sc = sc; 1381 1382 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1383 } 1384 1385 void 1386 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1387 { 1388 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1389 1390 ut_bdev_io_set_buf(bdev_io); 1391 1392 cb(ch, bdev_io, true); 1393 } 1394 1395 static void 1396 test_create_ctrlr(void) 1397 { 1398 struct spdk_nvme_transport_id trid = {}; 1399 struct spdk_nvme_ctrlr ctrlr = {}; 1400 int rc; 1401 1402 ut_init_trid(&trid); 1403 1404 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1405 CU_ASSERT(rc == 0); 1406 1407 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1408 1409 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1410 CU_ASSERT(rc == 0); 1411 1412 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1413 1414 poll_threads(); 1415 spdk_delay_us(1000); 1416 poll_threads(); 1417 1418 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1419 } 1420 1421 static void 1422 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1423 { 1424 bool *detect_remove = cb_arg; 1425 1426 CU_ASSERT(rc != 0); 1427 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1428 1429 *detect_remove = true; 1430 } 1431 1432 static void 1433 test_reset_ctrlr(void) 1434 { 1435 struct spdk_nvme_transport_id trid = {}; 1436 struct spdk_nvme_ctrlr ctrlr = {}; 1437 struct nvme_ctrlr *nvme_ctrlr = NULL; 1438 struct nvme_path_id *curr_trid; 1439 struct spdk_io_channel *ch1, *ch2; 1440 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1441 bool detect_remove; 1442 int rc; 1443 1444 ut_init_trid(&trid); 1445 TAILQ_INIT(&ctrlr.active_io_qpairs); 1446 1447 set_thread(0); 1448 1449 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1450 CU_ASSERT(rc == 0); 1451 1452 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1453 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1454 1455 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1456 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1457 1458 ch1 = spdk_get_io_channel(nvme_ctrlr); 1459 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1460 1461 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1462 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1463 1464 set_thread(1); 1465 1466 ch2 = spdk_get_io_channel(nvme_ctrlr); 1467 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1468 1469 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1470 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1471 1472 /* Reset starts from thread 1. */ 1473 set_thread(1); 1474 1475 /* Case 1: ctrlr is already being destructed. */ 1476 nvme_ctrlr->destruct = true; 1477 1478 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1479 CU_ASSERT(rc == -ENXIO); 1480 1481 /* Case 2: reset is in progress. */ 1482 nvme_ctrlr->destruct = false; 1483 nvme_ctrlr->resetting = true; 1484 1485 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1486 CU_ASSERT(rc == -EBUSY); 1487 1488 /* Case 3: reset completes successfully. */ 1489 nvme_ctrlr->resetting = false; 1490 curr_trid->last_failed_tsc = spdk_get_ticks(); 1491 ctrlr.is_failed = true; 1492 1493 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1494 CU_ASSERT(rc == 0); 1495 CU_ASSERT(nvme_ctrlr->resetting == true); 1496 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1497 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1498 1499 poll_thread_times(0, 3); 1500 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1501 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1502 1503 poll_thread_times(0, 1); 1504 poll_thread_times(1, 1); 1505 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1506 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1507 CU_ASSERT(ctrlr.is_failed == true); 1508 1509 poll_thread_times(1, 1); 1510 poll_thread_times(0, 1); 1511 CU_ASSERT(ctrlr.is_failed == false); 1512 CU_ASSERT(ctrlr.adminq.is_connected == false); 1513 1514 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1515 poll_thread_times(0, 2); 1516 CU_ASSERT(ctrlr.adminq.is_connected == true); 1517 1518 poll_thread_times(0, 1); 1519 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1520 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1521 1522 poll_thread_times(1, 1); 1523 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1524 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1525 CU_ASSERT(nvme_ctrlr->resetting == true); 1526 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1527 1528 poll_thread_times(0, 2); 1529 CU_ASSERT(nvme_ctrlr->resetting == true); 1530 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1531 poll_thread_times(1, 1); 1532 CU_ASSERT(nvme_ctrlr->resetting == true); 1533 poll_thread_times(0, 1); 1534 CU_ASSERT(nvme_ctrlr->resetting == false); 1535 1536 /* Case 4: ctrlr is already removed. */ 1537 ctrlr.is_removed = true; 1538 1539 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1540 CU_ASSERT(rc == 0); 1541 1542 detect_remove = false; 1543 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1544 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1545 1546 poll_threads(); 1547 1548 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1549 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1550 CU_ASSERT(detect_remove == true); 1551 1552 ctrlr.is_removed = false; 1553 1554 spdk_put_io_channel(ch2); 1555 1556 set_thread(0); 1557 1558 spdk_put_io_channel(ch1); 1559 1560 poll_threads(); 1561 1562 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1563 CU_ASSERT(rc == 0); 1564 1565 poll_threads(); 1566 spdk_delay_us(1000); 1567 poll_threads(); 1568 1569 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1570 } 1571 1572 static void 1573 test_race_between_reset_and_destruct_ctrlr(void) 1574 { 1575 struct spdk_nvme_transport_id trid = {}; 1576 struct spdk_nvme_ctrlr ctrlr = {}; 1577 struct nvme_ctrlr *nvme_ctrlr; 1578 struct spdk_io_channel *ch1, *ch2; 1579 int rc; 1580 1581 ut_init_trid(&trid); 1582 TAILQ_INIT(&ctrlr.active_io_qpairs); 1583 1584 set_thread(0); 1585 1586 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1587 CU_ASSERT(rc == 0); 1588 1589 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1590 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1591 1592 ch1 = spdk_get_io_channel(nvme_ctrlr); 1593 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1594 1595 set_thread(1); 1596 1597 ch2 = spdk_get_io_channel(nvme_ctrlr); 1598 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1599 1600 /* Reset starts from thread 1. */ 1601 set_thread(1); 1602 1603 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1604 CU_ASSERT(rc == 0); 1605 CU_ASSERT(nvme_ctrlr->resetting == true); 1606 1607 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1608 set_thread(0); 1609 1610 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1611 CU_ASSERT(rc == 0); 1612 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1613 CU_ASSERT(nvme_ctrlr->destruct == true); 1614 CU_ASSERT(nvme_ctrlr->resetting == true); 1615 1616 poll_threads(); 1617 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1618 poll_threads(); 1619 1620 /* Reset completed but ctrlr is not still destructed yet. */ 1621 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1622 CU_ASSERT(nvme_ctrlr->destruct == true); 1623 CU_ASSERT(nvme_ctrlr->resetting == false); 1624 1625 /* New reset request is rejected. */ 1626 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1627 CU_ASSERT(rc == -ENXIO); 1628 1629 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1630 * However there are two channels and destruct is not completed yet. 1631 */ 1632 poll_threads(); 1633 1634 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1635 1636 set_thread(0); 1637 1638 spdk_put_io_channel(ch1); 1639 1640 set_thread(1); 1641 1642 spdk_put_io_channel(ch2); 1643 1644 poll_threads(); 1645 spdk_delay_us(1000); 1646 poll_threads(); 1647 1648 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1649 } 1650 1651 static void 1652 test_failover_ctrlr(void) 1653 { 1654 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1655 struct spdk_nvme_ctrlr ctrlr = {}; 1656 struct nvme_ctrlr *nvme_ctrlr = NULL; 1657 struct nvme_path_id *curr_trid, *next_trid; 1658 struct spdk_io_channel *ch1, *ch2; 1659 int rc; 1660 1661 ut_init_trid(&trid1); 1662 ut_init_trid2(&trid2); 1663 TAILQ_INIT(&ctrlr.active_io_qpairs); 1664 1665 set_thread(0); 1666 1667 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1668 CU_ASSERT(rc == 0); 1669 1670 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1671 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1672 1673 ch1 = spdk_get_io_channel(nvme_ctrlr); 1674 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1675 1676 set_thread(1); 1677 1678 ch2 = spdk_get_io_channel(nvme_ctrlr); 1679 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1680 1681 /* First, test one trid case. */ 1682 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1683 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1684 1685 /* Failover starts from thread 1. */ 1686 set_thread(1); 1687 1688 /* Case 1: ctrlr is already being destructed. */ 1689 nvme_ctrlr->destruct = true; 1690 1691 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1692 CU_ASSERT(rc == -ENXIO); 1693 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1694 1695 /* Case 2: reset is in progress. */ 1696 nvme_ctrlr->destruct = false; 1697 nvme_ctrlr->resetting = true; 1698 1699 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1700 CU_ASSERT(rc == -EINPROGRESS); 1701 1702 /* Case 3: reset completes successfully. */ 1703 nvme_ctrlr->resetting = false; 1704 1705 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1706 CU_ASSERT(rc == 0); 1707 1708 CU_ASSERT(nvme_ctrlr->resetting == true); 1709 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1710 1711 poll_threads(); 1712 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1713 poll_threads(); 1714 1715 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1716 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1717 1718 CU_ASSERT(nvme_ctrlr->resetting == false); 1719 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1720 1721 set_thread(0); 1722 1723 /* Second, test two trids case. */ 1724 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1725 CU_ASSERT(rc == 0); 1726 1727 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1728 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1729 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1730 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1731 1732 /* Failover starts from thread 1. */ 1733 set_thread(1); 1734 1735 /* Case 4: reset is in progress. */ 1736 nvme_ctrlr->resetting = true; 1737 1738 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1739 CU_ASSERT(rc == -EINPROGRESS); 1740 1741 /* Case 5: failover completes successfully. */ 1742 nvme_ctrlr->resetting = false; 1743 1744 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1745 CU_ASSERT(rc == 0); 1746 1747 CU_ASSERT(nvme_ctrlr->resetting == true); 1748 1749 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1750 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1751 CU_ASSERT(next_trid != curr_trid); 1752 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1753 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1754 1755 poll_threads(); 1756 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1757 poll_threads(); 1758 1759 CU_ASSERT(nvme_ctrlr->resetting == false); 1760 1761 spdk_put_io_channel(ch2); 1762 1763 set_thread(0); 1764 1765 spdk_put_io_channel(ch1); 1766 1767 poll_threads(); 1768 1769 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1770 CU_ASSERT(rc == 0); 1771 1772 poll_threads(); 1773 spdk_delay_us(1000); 1774 poll_threads(); 1775 1776 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1777 } 1778 1779 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1780 * 1781 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1782 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1783 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1784 * have been active, i.e., the head of the list until the failover completed. 1785 * However trid3 was inserted to the head of the list by mistake. 1786 * 1787 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1788 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1789 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1790 * may be executed repeatedly before failover is executed. Hence this bug is real. 1791 * 1792 * The following test verifies the fix. 1793 */ 1794 static void 1795 test_race_between_failover_and_add_secondary_trid(void) 1796 { 1797 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1798 struct spdk_nvme_ctrlr ctrlr = {}; 1799 struct nvme_ctrlr *nvme_ctrlr = NULL; 1800 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1801 struct spdk_io_channel *ch1, *ch2; 1802 int rc; 1803 1804 ut_init_trid(&trid1); 1805 ut_init_trid2(&trid2); 1806 ut_init_trid3(&trid3); 1807 TAILQ_INIT(&ctrlr.active_io_qpairs); 1808 1809 set_thread(0); 1810 1811 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1812 CU_ASSERT(rc == 0); 1813 1814 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1815 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1816 1817 ch1 = spdk_get_io_channel(nvme_ctrlr); 1818 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1819 1820 set_thread(1); 1821 1822 ch2 = spdk_get_io_channel(nvme_ctrlr); 1823 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1824 1825 set_thread(0); 1826 1827 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1828 CU_ASSERT(rc == 0); 1829 1830 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1831 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1832 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1833 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1834 path_id2 = TAILQ_NEXT(path_id1, link); 1835 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1836 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1837 1838 ctrlr.fail_reset = true; 1839 1840 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1841 CU_ASSERT(rc == 0); 1842 1843 poll_threads(); 1844 1845 CU_ASSERT(path_id1->last_failed_tsc != 0); 1846 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1847 1848 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1849 CU_ASSERT(rc == 0); 1850 1851 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1852 CU_ASSERT(rc == 0); 1853 1854 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1855 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1856 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1857 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1858 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1859 path_id3 = TAILQ_NEXT(path_id2, link); 1860 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1861 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1862 1863 poll_threads(); 1864 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1865 poll_threads(); 1866 1867 spdk_put_io_channel(ch1); 1868 1869 set_thread(1); 1870 1871 spdk_put_io_channel(ch2); 1872 1873 poll_threads(); 1874 1875 set_thread(0); 1876 1877 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1878 CU_ASSERT(rc == 0); 1879 1880 poll_threads(); 1881 spdk_delay_us(1000); 1882 poll_threads(); 1883 1884 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1885 } 1886 1887 static void 1888 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1889 { 1890 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1891 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1892 } 1893 1894 static void 1895 test_pending_reset(void) 1896 { 1897 struct spdk_nvme_transport_id trid = {}; 1898 struct spdk_nvme_ctrlr *ctrlr; 1899 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 1900 struct nvme_ctrlr *nvme_ctrlr = NULL; 1901 const int STRING_SIZE = 32; 1902 const char *attached_names[STRING_SIZE]; 1903 struct nvme_bdev *bdev; 1904 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1905 struct spdk_io_channel *ch1, *ch2; 1906 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1907 struct nvme_io_path *io_path1, *io_path2; 1908 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1909 int rc; 1910 1911 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1912 ut_init_trid(&trid); 1913 1914 set_thread(0); 1915 1916 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1917 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1918 1919 g_ut_attach_ctrlr_status = 0; 1920 g_ut_attach_bdev_count = 1; 1921 1922 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1923 attach_ctrlr_done, NULL, &opts, NULL, false); 1924 CU_ASSERT(rc == 0); 1925 1926 spdk_delay_us(1000); 1927 poll_threads(); 1928 1929 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1930 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1931 1932 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1933 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1934 1935 ch1 = spdk_get_io_channel(bdev); 1936 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1937 1938 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1939 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1940 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1941 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1942 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1943 1944 set_thread(1); 1945 1946 ch2 = spdk_get_io_channel(bdev); 1947 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1948 1949 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1950 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1951 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1952 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1953 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1954 1955 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1956 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1957 1958 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1959 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1960 1961 /* The first reset request is submitted on thread 1, and the second reset request 1962 * is submitted on thread 0 while processing the first request. 1963 */ 1964 bdev_nvme_submit_request(ch2, first_bdev_io); 1965 CU_ASSERT(nvme_ctrlr->resetting == true); 1966 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1967 1968 set_thread(0); 1969 1970 bdev_nvme_submit_request(ch1, second_bdev_io); 1971 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1972 1973 poll_threads(); 1974 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1975 poll_threads(); 1976 1977 CU_ASSERT(nvme_ctrlr->resetting == false); 1978 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1979 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1980 1981 /* The first reset request is submitted on thread 1, and the second reset request 1982 * is submitted on thread 0 while processing the first request. 1983 * 1984 * The difference from the above scenario is that the controller is removed while 1985 * processing the first request. Hence both reset requests should fail. 1986 */ 1987 set_thread(1); 1988 1989 bdev_nvme_submit_request(ch2, first_bdev_io); 1990 CU_ASSERT(nvme_ctrlr->resetting == true); 1991 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1992 1993 set_thread(0); 1994 1995 bdev_nvme_submit_request(ch1, second_bdev_io); 1996 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1997 1998 ctrlr->fail_reset = true; 1999 2000 poll_threads(); 2001 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2002 poll_threads(); 2003 2004 CU_ASSERT(nvme_ctrlr->resetting == false); 2005 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2006 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2007 2008 spdk_put_io_channel(ch1); 2009 2010 set_thread(1); 2011 2012 spdk_put_io_channel(ch2); 2013 2014 poll_threads(); 2015 2016 set_thread(0); 2017 2018 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2019 CU_ASSERT(rc == 0); 2020 2021 poll_threads(); 2022 spdk_delay_us(1000); 2023 poll_threads(); 2024 2025 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2026 2027 free(first_bdev_io); 2028 free(second_bdev_io); 2029 } 2030 2031 static void 2032 test_attach_ctrlr(void) 2033 { 2034 struct spdk_nvme_transport_id trid = {}; 2035 struct spdk_nvme_ctrlr *ctrlr; 2036 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2037 struct nvme_ctrlr *nvme_ctrlr; 2038 const int STRING_SIZE = 32; 2039 const char *attached_names[STRING_SIZE]; 2040 struct nvme_bdev *nbdev; 2041 int rc; 2042 2043 set_thread(0); 2044 2045 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2046 ut_init_trid(&trid); 2047 2048 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2049 * by probe polling. 2050 */ 2051 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2052 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2053 2054 ctrlr->is_failed = true; 2055 g_ut_attach_ctrlr_status = -EIO; 2056 g_ut_attach_bdev_count = 0; 2057 2058 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2059 attach_ctrlr_done, NULL, &opts, NULL, false); 2060 CU_ASSERT(rc == 0); 2061 2062 spdk_delay_us(1000); 2063 poll_threads(); 2064 2065 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2066 2067 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2068 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2069 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2070 2071 g_ut_attach_ctrlr_status = 0; 2072 2073 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2074 attach_ctrlr_done, NULL, &opts, NULL, false); 2075 CU_ASSERT(rc == 0); 2076 2077 spdk_delay_us(1000); 2078 poll_threads(); 2079 2080 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2081 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2082 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2083 2084 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2085 CU_ASSERT(rc == 0); 2086 2087 poll_threads(); 2088 spdk_delay_us(1000); 2089 poll_threads(); 2090 2091 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2092 2093 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2094 * one nvme_bdev is created. 2095 */ 2096 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2097 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2098 2099 g_ut_attach_bdev_count = 1; 2100 2101 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2102 attach_ctrlr_done, NULL, &opts, NULL, false); 2103 CU_ASSERT(rc == 0); 2104 2105 spdk_delay_us(1000); 2106 poll_threads(); 2107 2108 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2109 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2110 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2111 2112 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2113 attached_names[0] = NULL; 2114 2115 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2116 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2117 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2118 2119 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2120 CU_ASSERT(rc == 0); 2121 2122 poll_threads(); 2123 spdk_delay_us(1000); 2124 poll_threads(); 2125 2126 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2127 2128 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2129 * created because creating one nvme_bdev failed. 2130 */ 2131 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2132 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2133 2134 g_ut_register_bdev_status = -EINVAL; 2135 g_ut_attach_bdev_count = 0; 2136 2137 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2138 attach_ctrlr_done, NULL, &opts, NULL, false); 2139 CU_ASSERT(rc == 0); 2140 2141 spdk_delay_us(1000); 2142 poll_threads(); 2143 2144 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2145 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2146 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2147 2148 CU_ASSERT(attached_names[0] == NULL); 2149 2150 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2151 CU_ASSERT(rc == 0); 2152 2153 poll_threads(); 2154 spdk_delay_us(1000); 2155 poll_threads(); 2156 2157 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2158 2159 g_ut_register_bdev_status = 0; 2160 } 2161 2162 static void 2163 test_aer_cb(void) 2164 { 2165 struct spdk_nvme_transport_id trid = {}; 2166 struct spdk_nvme_ctrlr *ctrlr; 2167 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2168 struct nvme_ctrlr *nvme_ctrlr; 2169 struct nvme_bdev *bdev; 2170 const int STRING_SIZE = 32; 2171 const char *attached_names[STRING_SIZE]; 2172 union spdk_nvme_async_event_completion event = {}; 2173 struct spdk_nvme_cpl cpl = {}; 2174 int rc; 2175 2176 set_thread(0); 2177 2178 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2179 ut_init_trid(&trid); 2180 2181 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2182 * namespaces are populated. 2183 */ 2184 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2185 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2186 2187 ctrlr->ns[0].is_active = false; 2188 2189 g_ut_attach_ctrlr_status = 0; 2190 g_ut_attach_bdev_count = 3; 2191 2192 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2193 attach_ctrlr_done, NULL, &opts, NULL, false); 2194 CU_ASSERT(rc == 0); 2195 2196 spdk_delay_us(1000); 2197 poll_threads(); 2198 2199 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2200 poll_threads(); 2201 2202 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2203 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2204 2205 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2206 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2207 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2208 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2209 2210 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2211 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2212 CU_ASSERT(bdev->disk.blockcnt == 1024); 2213 2214 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2215 * change the size of the 4th namespace. 2216 */ 2217 ctrlr->ns[0].is_active = true; 2218 ctrlr->ns[2].is_active = false; 2219 ctrlr->nsdata[3].nsze = 2048; 2220 2221 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2222 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2223 cpl.cdw0 = event.raw; 2224 2225 aer_cb(nvme_ctrlr, &cpl); 2226 2227 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2228 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2229 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2230 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2231 CU_ASSERT(bdev->disk.blockcnt == 2048); 2232 2233 /* Change ANA state of active namespaces. */ 2234 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2235 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2236 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2237 2238 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2239 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2240 cpl.cdw0 = event.raw; 2241 2242 aer_cb(nvme_ctrlr, &cpl); 2243 2244 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2245 poll_threads(); 2246 2247 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2248 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2249 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2250 2251 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2252 CU_ASSERT(rc == 0); 2253 2254 poll_threads(); 2255 spdk_delay_us(1000); 2256 poll_threads(); 2257 2258 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2259 } 2260 2261 static void 2262 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2263 enum spdk_bdev_io_type io_type) 2264 { 2265 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2266 struct nvme_io_path *io_path; 2267 struct spdk_nvme_qpair *qpair; 2268 2269 io_path = bdev_nvme_find_io_path(nbdev_ch); 2270 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2271 qpair = io_path->qpair->qpair; 2272 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2273 2274 bdev_io->type = io_type; 2275 bdev_io->internal.in_submit_request = true; 2276 2277 bdev_nvme_submit_request(ch, bdev_io); 2278 2279 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2280 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2281 2282 poll_threads(); 2283 2284 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2285 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2286 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2287 } 2288 2289 static void 2290 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2291 enum spdk_bdev_io_type io_type) 2292 { 2293 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2294 struct nvme_io_path *io_path; 2295 struct spdk_nvme_qpair *qpair; 2296 2297 io_path = bdev_nvme_find_io_path(nbdev_ch); 2298 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2299 qpair = io_path->qpair->qpair; 2300 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2301 2302 bdev_io->type = io_type; 2303 bdev_io->internal.in_submit_request = true; 2304 2305 bdev_nvme_submit_request(ch, bdev_io); 2306 2307 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2308 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2309 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2310 } 2311 2312 static void 2313 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2314 { 2315 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2316 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2317 struct ut_nvme_req *req; 2318 struct nvme_io_path *io_path; 2319 struct spdk_nvme_qpair *qpair; 2320 2321 io_path = bdev_nvme_find_io_path(nbdev_ch); 2322 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2323 qpair = io_path->qpair->qpair; 2324 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2325 2326 /* Only compare and write now. */ 2327 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2328 bdev_io->internal.in_submit_request = true; 2329 2330 bdev_nvme_submit_request(ch, bdev_io); 2331 2332 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2333 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2334 CU_ASSERT(bio->first_fused_submitted == true); 2335 2336 /* First outstanding request is compare operation. */ 2337 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2338 SPDK_CU_ASSERT_FATAL(req != NULL); 2339 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2340 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2341 2342 poll_threads(); 2343 2344 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2345 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2346 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2347 } 2348 2349 static void 2350 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2351 struct spdk_nvme_ctrlr *ctrlr) 2352 { 2353 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2354 bdev_io->internal.in_submit_request = true; 2355 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2356 2357 bdev_nvme_submit_request(ch, bdev_io); 2358 2359 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2360 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2361 2362 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2363 poll_thread_times(1, 1); 2364 2365 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2366 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2367 2368 poll_thread_times(0, 1); 2369 2370 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2371 } 2372 2373 static void 2374 test_submit_nvme_cmd(void) 2375 { 2376 struct spdk_nvme_transport_id trid = {}; 2377 struct spdk_nvme_ctrlr *ctrlr; 2378 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2379 struct nvme_ctrlr *nvme_ctrlr; 2380 const int STRING_SIZE = 32; 2381 const char *attached_names[STRING_SIZE]; 2382 struct nvme_bdev *bdev; 2383 struct spdk_bdev_io *bdev_io; 2384 struct spdk_io_channel *ch; 2385 int rc; 2386 2387 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2388 ut_init_trid(&trid); 2389 2390 set_thread(1); 2391 2392 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2393 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2394 2395 g_ut_attach_ctrlr_status = 0; 2396 g_ut_attach_bdev_count = 1; 2397 2398 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2399 attach_ctrlr_done, NULL, &opts, NULL, false); 2400 CU_ASSERT(rc == 0); 2401 2402 spdk_delay_us(1000); 2403 poll_threads(); 2404 2405 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2406 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2407 2408 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2409 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2410 2411 set_thread(0); 2412 2413 ch = spdk_get_io_channel(bdev); 2414 SPDK_CU_ASSERT_FATAL(ch != NULL); 2415 2416 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2417 2418 bdev_io->u.bdev.iovs = NULL; 2419 2420 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2421 2422 ut_bdev_io_set_buf(bdev_io); 2423 2424 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2425 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2426 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2427 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2428 2429 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2430 2431 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2432 2433 /* Verify that ext NVME API is called when data is described by memory domain */ 2434 g_ut_read_ext_called = false; 2435 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2436 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2437 CU_ASSERT(g_ut_read_ext_called == true); 2438 g_ut_read_ext_called = false; 2439 bdev_io->u.bdev.memory_domain = NULL; 2440 2441 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2442 2443 free(bdev_io); 2444 2445 spdk_put_io_channel(ch); 2446 2447 poll_threads(); 2448 2449 set_thread(1); 2450 2451 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2452 CU_ASSERT(rc == 0); 2453 2454 poll_threads(); 2455 spdk_delay_us(1000); 2456 poll_threads(); 2457 2458 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2459 } 2460 2461 static void 2462 test_add_remove_trid(void) 2463 { 2464 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2465 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2466 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2467 struct nvme_ctrlr *nvme_ctrlr = NULL; 2468 const int STRING_SIZE = 32; 2469 const char *attached_names[STRING_SIZE]; 2470 struct nvme_path_id *ctrid; 2471 int rc; 2472 2473 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2474 ut_init_trid(&path1.trid); 2475 ut_init_trid2(&path2.trid); 2476 ut_init_trid3(&path3.trid); 2477 2478 set_thread(0); 2479 2480 g_ut_attach_ctrlr_status = 0; 2481 g_ut_attach_bdev_count = 0; 2482 2483 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2484 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2485 2486 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2487 attach_ctrlr_done, NULL, &opts, NULL, false); 2488 CU_ASSERT(rc == 0); 2489 2490 spdk_delay_us(1000); 2491 poll_threads(); 2492 2493 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2494 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2495 2496 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2497 2498 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2499 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2500 2501 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2502 attach_ctrlr_done, NULL, &opts, NULL, false); 2503 CU_ASSERT(rc == 0); 2504 2505 spdk_delay_us(1000); 2506 poll_threads(); 2507 2508 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2509 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2510 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2511 break; 2512 } 2513 } 2514 CU_ASSERT(ctrid != NULL); 2515 2516 /* trid3 is not in the registered list. */ 2517 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2518 CU_ASSERT(rc == -ENXIO); 2519 2520 /* trid2 is not used, and simply removed. */ 2521 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2522 CU_ASSERT(rc == 0); 2523 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2524 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2525 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2526 } 2527 2528 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2529 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2530 2531 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2532 attach_ctrlr_done, NULL, &opts, NULL, false); 2533 CU_ASSERT(rc == 0); 2534 2535 spdk_delay_us(1000); 2536 poll_threads(); 2537 2538 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2539 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2540 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2541 break; 2542 } 2543 } 2544 CU_ASSERT(ctrid != NULL); 2545 2546 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2547 * If we add path2 again, path2 should be inserted between path1 and path3. 2548 * Then, we remove path2. It is not used, and simply removed. 2549 */ 2550 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2551 2552 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2553 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2554 2555 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2556 attach_ctrlr_done, NULL, &opts, NULL, false); 2557 CU_ASSERT(rc == 0); 2558 2559 spdk_delay_us(1000); 2560 poll_threads(); 2561 2562 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2563 2564 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2565 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2566 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2567 2568 ctrid = TAILQ_NEXT(ctrid, link); 2569 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2570 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2571 2572 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2573 CU_ASSERT(rc == 0); 2574 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2575 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2576 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2577 } 2578 2579 /* path1 is currently used and path3 is an alternative path. 2580 * If we remove path1, path is changed to path3. 2581 */ 2582 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2583 CU_ASSERT(rc == 0); 2584 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2585 CU_ASSERT(nvme_ctrlr->resetting == true); 2586 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2587 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2588 } 2589 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2590 2591 poll_threads(); 2592 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2593 poll_threads(); 2594 2595 CU_ASSERT(nvme_ctrlr->resetting == false); 2596 2597 /* path3 is the current and only path. If we remove path3, the corresponding 2598 * nvme_ctrlr is removed. 2599 */ 2600 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2601 CU_ASSERT(rc == 0); 2602 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2603 2604 poll_threads(); 2605 spdk_delay_us(1000); 2606 poll_threads(); 2607 2608 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2609 2610 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2611 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2612 2613 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2614 attach_ctrlr_done, NULL, &opts, NULL, false); 2615 CU_ASSERT(rc == 0); 2616 2617 spdk_delay_us(1000); 2618 poll_threads(); 2619 2620 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2621 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2622 2623 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2624 2625 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2626 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2627 2628 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2629 attach_ctrlr_done, NULL, &opts, NULL, false); 2630 CU_ASSERT(rc == 0); 2631 2632 spdk_delay_us(1000); 2633 poll_threads(); 2634 2635 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2636 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2637 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2638 break; 2639 } 2640 } 2641 CU_ASSERT(ctrid != NULL); 2642 2643 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2644 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2645 CU_ASSERT(rc == 0); 2646 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2647 2648 poll_threads(); 2649 spdk_delay_us(1000); 2650 poll_threads(); 2651 2652 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2653 } 2654 2655 static void 2656 test_abort(void) 2657 { 2658 struct spdk_nvme_transport_id trid = {}; 2659 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 2660 struct spdk_nvme_ctrlr *ctrlr; 2661 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 2662 struct nvme_ctrlr *nvme_ctrlr; 2663 const int STRING_SIZE = 32; 2664 const char *attached_names[STRING_SIZE]; 2665 struct nvme_bdev *bdev; 2666 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2667 struct spdk_io_channel *ch1, *ch2; 2668 struct nvme_bdev_channel *nbdev_ch1; 2669 struct nvme_io_path *io_path1; 2670 struct nvme_qpair *nvme_qpair1; 2671 int rc; 2672 2673 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2674 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2675 * are submitted on thread 1. Both should succeed. 2676 */ 2677 2678 ut_init_trid(&trid); 2679 2680 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2681 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2682 2683 g_ut_attach_ctrlr_status = 0; 2684 g_ut_attach_bdev_count = 1; 2685 2686 set_thread(1); 2687 2688 opts.ctrlr_loss_timeout_sec = -1; 2689 opts.reconnect_delay_sec = 1; 2690 2691 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2692 attach_ctrlr_done, NULL, &dopts, &opts, false); 2693 CU_ASSERT(rc == 0); 2694 2695 spdk_delay_us(1000); 2696 poll_threads(); 2697 2698 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2699 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2700 2701 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2702 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2703 2704 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2705 ut_bdev_io_set_buf(write_io); 2706 2707 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2708 ut_bdev_io_set_buf(fuse_io); 2709 2710 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2711 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2712 2713 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2714 2715 set_thread(0); 2716 2717 ch1 = spdk_get_io_channel(bdev); 2718 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2719 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2720 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2721 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2722 nvme_qpair1 = io_path1->qpair; 2723 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2724 2725 set_thread(1); 2726 2727 ch2 = spdk_get_io_channel(bdev); 2728 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2729 2730 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2731 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2732 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2733 2734 /* Aborting the already completed request should fail. */ 2735 write_io->internal.in_submit_request = true; 2736 bdev_nvme_submit_request(ch1, write_io); 2737 poll_threads(); 2738 2739 CU_ASSERT(write_io->internal.in_submit_request == false); 2740 2741 abort_io->u.abort.bio_to_abort = write_io; 2742 abort_io->internal.in_submit_request = true; 2743 2744 bdev_nvme_submit_request(ch1, abort_io); 2745 2746 poll_threads(); 2747 2748 CU_ASSERT(abort_io->internal.in_submit_request == false); 2749 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2750 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2751 2752 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2753 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2754 2755 admin_io->internal.in_submit_request = true; 2756 bdev_nvme_submit_request(ch1, admin_io); 2757 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2758 poll_threads(); 2759 2760 CU_ASSERT(admin_io->internal.in_submit_request == false); 2761 2762 abort_io->u.abort.bio_to_abort = admin_io; 2763 abort_io->internal.in_submit_request = true; 2764 2765 bdev_nvme_submit_request(ch2, abort_io); 2766 2767 poll_threads(); 2768 2769 CU_ASSERT(abort_io->internal.in_submit_request == false); 2770 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2771 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2772 2773 /* Aborting the write request should succeed. */ 2774 write_io->internal.in_submit_request = true; 2775 bdev_nvme_submit_request(ch1, write_io); 2776 2777 CU_ASSERT(write_io->internal.in_submit_request == true); 2778 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2779 2780 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2781 abort_io->u.abort.bio_to_abort = write_io; 2782 abort_io->internal.in_submit_request = true; 2783 2784 bdev_nvme_submit_request(ch1, abort_io); 2785 2786 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2787 poll_threads(); 2788 2789 CU_ASSERT(abort_io->internal.in_submit_request == false); 2790 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2791 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2792 CU_ASSERT(write_io->internal.in_submit_request == false); 2793 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2794 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2795 2796 /* Aborting the fuse request should succeed. */ 2797 fuse_io->internal.in_submit_request = true; 2798 bdev_nvme_submit_request(ch1, fuse_io); 2799 2800 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2801 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2802 2803 abort_io->u.abort.bio_to_abort = fuse_io; 2804 abort_io->internal.in_submit_request = true; 2805 2806 bdev_nvme_submit_request(ch1, abort_io); 2807 2808 spdk_delay_us(10000); 2809 poll_threads(); 2810 2811 CU_ASSERT(abort_io->internal.in_submit_request == false); 2812 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2813 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2814 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2815 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2816 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2817 2818 /* Aborting the admin request should succeed. */ 2819 admin_io->internal.in_submit_request = true; 2820 bdev_nvme_submit_request(ch1, admin_io); 2821 2822 CU_ASSERT(admin_io->internal.in_submit_request == true); 2823 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2824 2825 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2826 abort_io->u.abort.bio_to_abort = admin_io; 2827 abort_io->internal.in_submit_request = true; 2828 2829 bdev_nvme_submit_request(ch2, abort_io); 2830 2831 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2832 poll_threads(); 2833 2834 CU_ASSERT(abort_io->internal.in_submit_request == false); 2835 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2836 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2837 CU_ASSERT(admin_io->internal.in_submit_request == false); 2838 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2839 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2840 2841 set_thread(0); 2842 2843 /* If qpair is disconnected, it is freed and then reconnected via resetting 2844 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2845 * while resetting the nvme_ctrlr. 2846 */ 2847 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2848 2849 poll_thread_times(0, 3); 2850 2851 CU_ASSERT(nvme_qpair1->qpair == NULL); 2852 CU_ASSERT(nvme_ctrlr->resetting == true); 2853 2854 write_io->internal.in_submit_request = true; 2855 2856 bdev_nvme_submit_request(ch1, write_io); 2857 2858 CU_ASSERT(write_io->internal.in_submit_request == true); 2859 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 2860 2861 /* Aborting the queued write request should succeed immediately. */ 2862 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2863 abort_io->u.abort.bio_to_abort = write_io; 2864 abort_io->internal.in_submit_request = true; 2865 2866 bdev_nvme_submit_request(ch1, abort_io); 2867 2868 CU_ASSERT(abort_io->internal.in_submit_request == false); 2869 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2870 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2871 CU_ASSERT(write_io->internal.in_submit_request == false); 2872 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2873 2874 poll_threads(); 2875 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2876 poll_threads(); 2877 2878 spdk_put_io_channel(ch1); 2879 2880 set_thread(1); 2881 2882 spdk_put_io_channel(ch2); 2883 2884 poll_threads(); 2885 2886 free(write_io); 2887 free(fuse_io); 2888 free(admin_io); 2889 free(abort_io); 2890 2891 set_thread(1); 2892 2893 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2894 CU_ASSERT(rc == 0); 2895 2896 poll_threads(); 2897 spdk_delay_us(1000); 2898 poll_threads(); 2899 2900 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2901 } 2902 2903 static void 2904 test_get_io_qpair(void) 2905 { 2906 struct spdk_nvme_transport_id trid = {}; 2907 struct spdk_nvme_ctrlr ctrlr = {}; 2908 struct nvme_ctrlr *nvme_ctrlr = NULL; 2909 struct spdk_io_channel *ch; 2910 struct nvme_ctrlr_channel *ctrlr_ch; 2911 struct spdk_nvme_qpair *qpair; 2912 int rc; 2913 2914 ut_init_trid(&trid); 2915 TAILQ_INIT(&ctrlr.active_io_qpairs); 2916 2917 set_thread(0); 2918 2919 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2920 CU_ASSERT(rc == 0); 2921 2922 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2923 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2924 2925 ch = spdk_get_io_channel(nvme_ctrlr); 2926 SPDK_CU_ASSERT_FATAL(ch != NULL); 2927 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2928 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2929 2930 qpair = bdev_nvme_get_io_qpair(ch); 2931 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2932 2933 spdk_put_io_channel(ch); 2934 2935 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2936 CU_ASSERT(rc == 0); 2937 2938 poll_threads(); 2939 spdk_delay_us(1000); 2940 poll_threads(); 2941 2942 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2943 } 2944 2945 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2946 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2947 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2948 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2949 */ 2950 static void 2951 test_bdev_unregister(void) 2952 { 2953 struct spdk_nvme_transport_id trid = {}; 2954 struct spdk_nvme_ctrlr *ctrlr; 2955 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2956 struct nvme_ctrlr *nvme_ctrlr; 2957 struct nvme_ns *nvme_ns1, *nvme_ns2; 2958 const int STRING_SIZE = 32; 2959 const char *attached_names[STRING_SIZE]; 2960 struct nvme_bdev *bdev1, *bdev2; 2961 int rc; 2962 2963 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2964 ut_init_trid(&trid); 2965 2966 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2967 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2968 2969 g_ut_attach_ctrlr_status = 0; 2970 g_ut_attach_bdev_count = 2; 2971 2972 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2973 attach_ctrlr_done, NULL, &opts, NULL, false); 2974 CU_ASSERT(rc == 0); 2975 2976 spdk_delay_us(1000); 2977 poll_threads(); 2978 2979 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2980 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2981 2982 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2983 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2984 2985 bdev1 = nvme_ns1->bdev; 2986 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2987 2988 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2989 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2990 2991 bdev2 = nvme_ns2->bdev; 2992 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2993 2994 bdev_nvme_destruct(&bdev1->disk); 2995 bdev_nvme_destruct(&bdev2->disk); 2996 2997 poll_threads(); 2998 2999 CU_ASSERT(nvme_ns1->bdev == NULL); 3000 CU_ASSERT(nvme_ns2->bdev == NULL); 3001 3002 nvme_ctrlr->destruct = true; 3003 _nvme_ctrlr_destruct(nvme_ctrlr); 3004 3005 poll_threads(); 3006 spdk_delay_us(1000); 3007 poll_threads(); 3008 3009 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3010 } 3011 3012 static void 3013 test_compare_ns(void) 3014 { 3015 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3016 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3017 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3018 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3019 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3020 3021 /* No IDs are defined. */ 3022 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3023 3024 /* Only EUI64 are defined and not matched. */ 3025 nsdata1.eui64 = 0xABCDEF0123456789; 3026 nsdata2.eui64 = 0xBBCDEF0123456789; 3027 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3028 3029 /* Only EUI64 are defined and matched. */ 3030 nsdata2.eui64 = 0xABCDEF0123456789; 3031 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3032 3033 /* Only NGUID are defined and not matched. */ 3034 nsdata1.eui64 = 0x0; 3035 nsdata2.eui64 = 0x0; 3036 nsdata1.nguid[0] = 0x12; 3037 nsdata2.nguid[0] = 0x10; 3038 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3039 3040 /* Only NGUID are defined and matched. */ 3041 nsdata2.nguid[0] = 0x12; 3042 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3043 3044 /* Only UUID are defined and not matched. */ 3045 nsdata1.nguid[0] = 0x0; 3046 nsdata2.nguid[0] = 0x0; 3047 ns1.uuid = &uuid1; 3048 ns2.uuid = &uuid2; 3049 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3050 3051 /* Only one UUID is defined. */ 3052 ns1.uuid = NULL; 3053 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3054 3055 /* Only UUID are defined and matched. */ 3056 ns1.uuid = &uuid2; 3057 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3058 3059 /* All EUI64, NGUID, and UUID are defined and matched. */ 3060 nsdata1.eui64 = 0x123456789ABCDEF; 3061 nsdata2.eui64 = 0x123456789ABCDEF; 3062 nsdata1.nguid[15] = 0x34; 3063 nsdata2.nguid[15] = 0x34; 3064 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3065 3066 /* CSI are not matched. */ 3067 ns1.csi = SPDK_NVME_CSI_ZNS; 3068 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3069 } 3070 3071 static void 3072 test_init_ana_log_page(void) 3073 { 3074 struct spdk_nvme_transport_id trid = {}; 3075 struct spdk_nvme_ctrlr *ctrlr; 3076 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3077 struct nvme_ctrlr *nvme_ctrlr; 3078 const int STRING_SIZE = 32; 3079 const char *attached_names[STRING_SIZE]; 3080 int rc; 3081 3082 set_thread(0); 3083 3084 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3085 ut_init_trid(&trid); 3086 3087 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3088 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3089 3090 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3091 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3092 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3093 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3094 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3095 3096 g_ut_attach_ctrlr_status = 0; 3097 g_ut_attach_bdev_count = 5; 3098 3099 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3100 attach_ctrlr_done, NULL, &opts, NULL, false); 3101 CU_ASSERT(rc == 0); 3102 3103 spdk_delay_us(1000); 3104 poll_threads(); 3105 3106 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3107 poll_threads(); 3108 3109 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3110 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3111 3112 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3113 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3114 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3115 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3116 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3117 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3118 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3119 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3120 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3121 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3122 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3123 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3124 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3125 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3126 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3127 3128 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3129 CU_ASSERT(rc == 0); 3130 3131 poll_threads(); 3132 spdk_delay_us(1000); 3133 poll_threads(); 3134 3135 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3136 } 3137 3138 static void 3139 init_accel(void) 3140 { 3141 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3142 sizeof(int), "accel_p"); 3143 } 3144 3145 static void 3146 fini_accel(void) 3147 { 3148 spdk_io_device_unregister(g_accel_p, NULL); 3149 } 3150 3151 static void 3152 test_get_memory_domains(void) 3153 { 3154 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3155 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3156 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3157 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3158 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3159 struct spdk_memory_domain *domains[4] = {}; 3160 int rc = 0; 3161 3162 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3163 3164 /* nvme controller doesn't have memory domains */ 3165 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3166 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3167 CU_ASSERT(rc == 0); 3168 CU_ASSERT(domains[0] == NULL); 3169 CU_ASSERT(domains[1] == NULL); 3170 3171 /* nvme controller has a memory domain */ 3172 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3173 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3174 CU_ASSERT(rc == 1); 3175 CU_ASSERT(domains[0] != NULL); 3176 memset(domains, 0, sizeof(domains)); 3177 3178 /* multipath, 2 controllers report 1 memory domain each */ 3179 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3180 3181 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3182 CU_ASSERT(rc == 2); 3183 CU_ASSERT(domains[0] != NULL); 3184 CU_ASSERT(domains[1] != NULL); 3185 memset(domains, 0, sizeof(domains)); 3186 3187 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3188 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3189 CU_ASSERT(rc == 2); 3190 3191 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3192 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3193 CU_ASSERT(rc == 2); 3194 CU_ASSERT(domains[0] == NULL); 3195 CU_ASSERT(domains[1] == NULL); 3196 3197 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3198 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3199 CU_ASSERT(rc == 2); 3200 CU_ASSERT(domains[0] != NULL); 3201 CU_ASSERT(domains[1] == NULL); 3202 memset(domains, 0, sizeof(domains)); 3203 3204 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3205 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3206 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3207 CU_ASSERT(rc == 4); 3208 CU_ASSERT(domains[0] != NULL); 3209 CU_ASSERT(domains[1] != NULL); 3210 CU_ASSERT(domains[2] != NULL); 3211 CU_ASSERT(domains[3] != NULL); 3212 memset(domains, 0, sizeof(domains)); 3213 3214 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3215 * Array size is less than the number of memory domains */ 3216 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3217 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3218 CU_ASSERT(rc == 4); 3219 CU_ASSERT(domains[0] != NULL); 3220 CU_ASSERT(domains[1] != NULL); 3221 CU_ASSERT(domains[2] != NULL); 3222 CU_ASSERT(domains[3] == NULL); 3223 memset(domains, 0, sizeof(domains)); 3224 3225 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3226 } 3227 3228 static void 3229 test_reconnect_qpair(void) 3230 { 3231 struct spdk_nvme_transport_id trid = {}; 3232 struct spdk_nvme_ctrlr *ctrlr; 3233 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3234 struct nvme_ctrlr *nvme_ctrlr; 3235 const int STRING_SIZE = 32; 3236 const char *attached_names[STRING_SIZE]; 3237 struct nvme_bdev *bdev; 3238 struct spdk_io_channel *ch1, *ch2; 3239 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3240 struct nvme_io_path *io_path1, *io_path2; 3241 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3242 int rc; 3243 3244 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3245 ut_init_trid(&trid); 3246 3247 set_thread(0); 3248 3249 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3250 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3251 3252 g_ut_attach_ctrlr_status = 0; 3253 g_ut_attach_bdev_count = 1; 3254 3255 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3256 attach_ctrlr_done, NULL, &opts, NULL, false); 3257 CU_ASSERT(rc == 0); 3258 3259 spdk_delay_us(1000); 3260 poll_threads(); 3261 3262 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3263 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3264 3265 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3266 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3267 3268 ch1 = spdk_get_io_channel(bdev); 3269 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3270 3271 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3272 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3273 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3274 nvme_qpair1 = io_path1->qpair; 3275 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3276 3277 set_thread(1); 3278 3279 ch2 = spdk_get_io_channel(bdev); 3280 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3281 3282 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3283 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3284 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3285 nvme_qpair2 = io_path2->qpair; 3286 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3287 3288 /* If a qpair is disconnected, it is freed and then reconnected via 3289 * resetting the corresponding nvme_ctrlr. 3290 */ 3291 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3292 ctrlr->is_failed = true; 3293 3294 poll_thread_times(1, 3); 3295 CU_ASSERT(nvme_qpair1->qpair != NULL); 3296 CU_ASSERT(nvme_qpair2->qpair == NULL); 3297 CU_ASSERT(nvme_ctrlr->resetting == true); 3298 3299 poll_thread_times(0, 3); 3300 CU_ASSERT(nvme_qpair1->qpair == NULL); 3301 CU_ASSERT(nvme_qpair2->qpair == NULL); 3302 CU_ASSERT(ctrlr->is_failed == true); 3303 3304 poll_thread_times(1, 2); 3305 poll_thread_times(0, 1); 3306 CU_ASSERT(ctrlr->is_failed == false); 3307 CU_ASSERT(ctrlr->adminq.is_connected == false); 3308 3309 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3310 poll_thread_times(0, 2); 3311 CU_ASSERT(ctrlr->adminq.is_connected == true); 3312 3313 poll_thread_times(0, 1); 3314 poll_thread_times(1, 1); 3315 CU_ASSERT(nvme_qpair1->qpair != NULL); 3316 CU_ASSERT(nvme_qpair2->qpair != NULL); 3317 CU_ASSERT(nvme_ctrlr->resetting == true); 3318 3319 poll_thread_times(0, 2); 3320 poll_thread_times(1, 1); 3321 poll_thread_times(0, 1); 3322 CU_ASSERT(nvme_ctrlr->resetting == false); 3323 3324 poll_threads(); 3325 3326 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3327 * fails, the qpair is just freed. 3328 */ 3329 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3330 ctrlr->is_failed = true; 3331 ctrlr->fail_reset = true; 3332 3333 poll_thread_times(1, 3); 3334 CU_ASSERT(nvme_qpair1->qpair != NULL); 3335 CU_ASSERT(nvme_qpair2->qpair == NULL); 3336 CU_ASSERT(nvme_ctrlr->resetting == true); 3337 3338 poll_thread_times(0, 3); 3339 poll_thread_times(1, 1); 3340 CU_ASSERT(nvme_qpair1->qpair == NULL); 3341 CU_ASSERT(nvme_qpair2->qpair == NULL); 3342 CU_ASSERT(ctrlr->is_failed == true); 3343 3344 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3345 poll_thread_times(0, 3); 3346 poll_thread_times(1, 1); 3347 poll_thread_times(0, 1); 3348 CU_ASSERT(ctrlr->is_failed == true); 3349 CU_ASSERT(nvme_ctrlr->resetting == false); 3350 CU_ASSERT(nvme_qpair1->qpair == NULL); 3351 CU_ASSERT(nvme_qpair2->qpair == NULL); 3352 3353 poll_threads(); 3354 3355 spdk_put_io_channel(ch2); 3356 3357 set_thread(0); 3358 3359 spdk_put_io_channel(ch1); 3360 3361 poll_threads(); 3362 3363 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3364 CU_ASSERT(rc == 0); 3365 3366 poll_threads(); 3367 spdk_delay_us(1000); 3368 poll_threads(); 3369 3370 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3371 } 3372 3373 static void 3374 test_create_bdev_ctrlr(void) 3375 { 3376 struct nvme_path_id path1 = {}, path2 = {}; 3377 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3378 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3379 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3380 const int STRING_SIZE = 32; 3381 const char *attached_names[STRING_SIZE]; 3382 int rc; 3383 3384 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3385 ut_init_trid(&path1.trid); 3386 ut_init_trid2(&path2.trid); 3387 3388 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3389 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3390 3391 g_ut_attach_ctrlr_status = 0; 3392 g_ut_attach_bdev_count = 0; 3393 3394 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3395 attach_ctrlr_done, NULL, &opts, NULL, true); 3396 CU_ASSERT(rc == 0); 3397 3398 spdk_delay_us(1000); 3399 poll_threads(); 3400 3401 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3402 poll_threads(); 3403 3404 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3405 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3406 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3407 3408 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3409 g_ut_attach_ctrlr_status = -EINVAL; 3410 3411 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3412 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3413 3414 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3415 3416 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3417 attach_ctrlr_done, NULL, &opts, NULL, true); 3418 CU_ASSERT(rc == 0); 3419 3420 spdk_delay_us(1000); 3421 poll_threads(); 3422 3423 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3424 poll_threads(); 3425 3426 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3427 3428 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3429 g_ut_attach_ctrlr_status = 0; 3430 3431 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3432 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3433 3434 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3435 attach_ctrlr_done, NULL, &opts, NULL, true); 3436 CU_ASSERT(rc == 0); 3437 3438 spdk_delay_us(1000); 3439 poll_threads(); 3440 3441 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3442 poll_threads(); 3443 3444 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3445 3446 /* Delete two ctrlrs at once. */ 3447 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3448 CU_ASSERT(rc == 0); 3449 3450 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3451 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3452 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3453 3454 poll_threads(); 3455 spdk_delay_us(1000); 3456 poll_threads(); 3457 3458 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3459 3460 /* Add two ctrlrs and delete one by one. */ 3461 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3462 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3463 3464 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3465 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3466 3467 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3468 attach_ctrlr_done, NULL, &opts, NULL, true); 3469 CU_ASSERT(rc == 0); 3470 3471 spdk_delay_us(1000); 3472 poll_threads(); 3473 3474 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3475 poll_threads(); 3476 3477 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3478 attach_ctrlr_done, NULL, &opts, NULL, true); 3479 CU_ASSERT(rc == 0); 3480 3481 spdk_delay_us(1000); 3482 poll_threads(); 3483 3484 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3485 poll_threads(); 3486 3487 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3488 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3489 3490 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3491 CU_ASSERT(rc == 0); 3492 3493 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3494 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3495 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3496 3497 poll_threads(); 3498 spdk_delay_us(1000); 3499 poll_threads(); 3500 3501 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3502 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3503 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3504 3505 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3506 CU_ASSERT(rc == 0); 3507 3508 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3509 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3510 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3511 3512 poll_threads(); 3513 spdk_delay_us(1000); 3514 poll_threads(); 3515 3516 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3517 } 3518 3519 static struct nvme_ns * 3520 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3521 { 3522 struct nvme_ns *nvme_ns; 3523 3524 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3525 if (nvme_ns->ctrlr == nvme_ctrlr) { 3526 return nvme_ns; 3527 } 3528 } 3529 3530 return NULL; 3531 } 3532 3533 static void 3534 test_add_multi_ns_to_bdev(void) 3535 { 3536 struct nvme_path_id path1 = {}, path2 = {}; 3537 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3538 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3539 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3540 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3541 struct nvme_ns *nvme_ns1, *nvme_ns2; 3542 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3543 const int STRING_SIZE = 32; 3544 const char *attached_names[STRING_SIZE]; 3545 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3546 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3547 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3548 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3549 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3550 int rc; 3551 3552 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3553 ut_init_trid(&path1.trid); 3554 ut_init_trid2(&path2.trid); 3555 3556 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3557 3558 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3559 * namespaces are populated. 3560 */ 3561 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3562 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3563 3564 ctrlr1->ns[1].is_active = false; 3565 ctrlr1->ns[4].is_active = false; 3566 ctrlr1->ns[0].uuid = &uuid1; 3567 ctrlr1->ns[2].uuid = &uuid3; 3568 ctrlr1->ns[3].uuid = &uuid4; 3569 3570 g_ut_attach_ctrlr_status = 0; 3571 g_ut_attach_bdev_count = 3; 3572 3573 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3574 attach_ctrlr_done, NULL, &opts, NULL, true); 3575 CU_ASSERT(rc == 0); 3576 3577 spdk_delay_us(1000); 3578 poll_threads(); 3579 3580 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3581 poll_threads(); 3582 3583 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3584 * namespaces are populated. The uuid of 4th namespace is different, and hence 3585 * adding 4th namespace to a bdev should fail. 3586 */ 3587 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3588 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3589 3590 ctrlr2->ns[2].is_active = false; 3591 ctrlr2->ns[4].is_active = false; 3592 ctrlr2->ns[0].uuid = &uuid1; 3593 ctrlr2->ns[1].uuid = &uuid2; 3594 ctrlr2->ns[3].uuid = &uuid44; 3595 3596 g_ut_attach_ctrlr_status = 0; 3597 g_ut_attach_bdev_count = 2; 3598 3599 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3600 attach_ctrlr_done, NULL, &opts, NULL, true); 3601 CU_ASSERT(rc == 0); 3602 3603 spdk_delay_us(1000); 3604 poll_threads(); 3605 3606 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3607 poll_threads(); 3608 3609 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3610 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3611 3612 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3613 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3614 3615 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3616 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3617 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3618 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3619 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3620 3621 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3622 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3623 3624 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3625 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3626 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3627 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3628 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3629 3630 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3631 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3632 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3633 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3634 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3635 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3636 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3637 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3638 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3639 3640 CU_ASSERT(bdev1->ref == 2); 3641 CU_ASSERT(bdev2->ref == 1); 3642 CU_ASSERT(bdev3->ref == 1); 3643 CU_ASSERT(bdev4->ref == 1); 3644 3645 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3646 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3647 CU_ASSERT(rc == 0); 3648 3649 poll_threads(); 3650 spdk_delay_us(1000); 3651 poll_threads(); 3652 3653 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3654 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3655 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2); 3656 3657 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3658 CU_ASSERT(rc == 0); 3659 3660 poll_threads(); 3661 spdk_delay_us(1000); 3662 poll_threads(); 3663 3664 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3665 3666 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3667 * can be deleted when the bdev subsystem shutdown. 3668 */ 3669 g_ut_attach_bdev_count = 1; 3670 3671 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3672 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3673 3674 ctrlr1->ns[0].uuid = &uuid1; 3675 3676 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3677 attach_ctrlr_done, NULL, &opts, NULL, true); 3678 CU_ASSERT(rc == 0); 3679 3680 spdk_delay_us(1000); 3681 poll_threads(); 3682 3683 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3684 poll_threads(); 3685 3686 ut_init_trid2(&path2.trid); 3687 3688 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3689 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3690 3691 ctrlr2->ns[0].uuid = &uuid1; 3692 3693 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3694 attach_ctrlr_done, NULL, &opts, NULL, true); 3695 CU_ASSERT(rc == 0); 3696 3697 spdk_delay_us(1000); 3698 poll_threads(); 3699 3700 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3701 poll_threads(); 3702 3703 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3704 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3705 3706 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3707 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3708 3709 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3710 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3711 3712 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3713 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3714 3715 /* Check if a nvme_bdev has two nvme_ns. */ 3716 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3717 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3718 CU_ASSERT(nvme_ns1->bdev == bdev1); 3719 3720 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3721 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3722 CU_ASSERT(nvme_ns2->bdev == bdev1); 3723 3724 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3725 bdev_nvme_destruct(&bdev1->disk); 3726 3727 poll_threads(); 3728 3729 CU_ASSERT(nvme_ns1->bdev == NULL); 3730 CU_ASSERT(nvme_ns2->bdev == NULL); 3731 3732 nvme_ctrlr1->destruct = true; 3733 _nvme_ctrlr_destruct(nvme_ctrlr1); 3734 3735 poll_threads(); 3736 spdk_delay_us(1000); 3737 poll_threads(); 3738 3739 nvme_ctrlr2->destruct = true; 3740 _nvme_ctrlr_destruct(nvme_ctrlr2); 3741 3742 poll_threads(); 3743 spdk_delay_us(1000); 3744 poll_threads(); 3745 3746 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3747 } 3748 3749 static void 3750 test_add_multi_io_paths_to_nbdev_ch(void) 3751 { 3752 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3753 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3754 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3755 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3756 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3757 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3758 const int STRING_SIZE = 32; 3759 const char *attached_names[STRING_SIZE]; 3760 struct nvme_bdev *bdev; 3761 struct spdk_io_channel *ch; 3762 struct nvme_bdev_channel *nbdev_ch; 3763 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3764 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3765 int rc; 3766 3767 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3768 ut_init_trid(&path1.trid); 3769 ut_init_trid2(&path2.trid); 3770 ut_init_trid3(&path3.trid); 3771 g_ut_attach_ctrlr_status = 0; 3772 g_ut_attach_bdev_count = 1; 3773 3774 set_thread(1); 3775 3776 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3777 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3778 3779 ctrlr1->ns[0].uuid = &uuid1; 3780 3781 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3782 attach_ctrlr_done, NULL, &opts, NULL, true); 3783 CU_ASSERT(rc == 0); 3784 3785 spdk_delay_us(1000); 3786 poll_threads(); 3787 3788 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3789 poll_threads(); 3790 3791 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3792 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3793 3794 ctrlr2->ns[0].uuid = &uuid1; 3795 3796 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3797 attach_ctrlr_done, NULL, &opts, NULL, true); 3798 CU_ASSERT(rc == 0); 3799 3800 spdk_delay_us(1000); 3801 poll_threads(); 3802 3803 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3804 poll_threads(); 3805 3806 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3807 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3808 3809 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3810 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3811 3812 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3813 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3814 3815 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3816 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3817 3818 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3819 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3820 3821 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3822 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3823 3824 set_thread(0); 3825 3826 ch = spdk_get_io_channel(bdev); 3827 SPDK_CU_ASSERT_FATAL(ch != NULL); 3828 nbdev_ch = spdk_io_channel_get_ctx(ch); 3829 3830 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3831 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3832 3833 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3834 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3835 3836 set_thread(1); 3837 3838 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3839 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3840 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3841 3842 ctrlr3->ns[0].uuid = &uuid1; 3843 3844 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3845 attach_ctrlr_done, NULL, &opts, NULL, true); 3846 CU_ASSERT(rc == 0); 3847 3848 spdk_delay_us(1000); 3849 poll_threads(); 3850 3851 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3852 poll_threads(); 3853 3854 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn); 3855 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3856 3857 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3858 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3859 3860 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3861 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3862 3863 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3864 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3865 CU_ASSERT(rc == 0); 3866 3867 poll_threads(); 3868 spdk_delay_us(1000); 3869 poll_threads(); 3870 3871 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1); 3872 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3873 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3); 3874 3875 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3876 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3877 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3878 3879 set_thread(0); 3880 3881 spdk_put_io_channel(ch); 3882 3883 poll_threads(); 3884 3885 set_thread(1); 3886 3887 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3888 CU_ASSERT(rc == 0); 3889 3890 poll_threads(); 3891 spdk_delay_us(1000); 3892 poll_threads(); 3893 3894 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3895 } 3896 3897 static void 3898 test_admin_path(void) 3899 { 3900 struct nvme_path_id path1 = {}, path2 = {}; 3901 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3902 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3903 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3904 const int STRING_SIZE = 32; 3905 const char *attached_names[STRING_SIZE]; 3906 struct nvme_bdev *bdev; 3907 struct spdk_io_channel *ch; 3908 struct spdk_bdev_io *bdev_io; 3909 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3910 int rc; 3911 3912 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3913 ut_init_trid(&path1.trid); 3914 ut_init_trid2(&path2.trid); 3915 g_ut_attach_ctrlr_status = 0; 3916 g_ut_attach_bdev_count = 1; 3917 3918 set_thread(0); 3919 3920 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3921 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3922 3923 ctrlr1->ns[0].uuid = &uuid1; 3924 3925 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3926 attach_ctrlr_done, NULL, &opts, NULL, true); 3927 CU_ASSERT(rc == 0); 3928 3929 spdk_delay_us(1000); 3930 poll_threads(); 3931 3932 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3933 poll_threads(); 3934 3935 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3936 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3937 3938 ctrlr2->ns[0].uuid = &uuid1; 3939 3940 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3941 attach_ctrlr_done, NULL, &opts, NULL, true); 3942 CU_ASSERT(rc == 0); 3943 3944 spdk_delay_us(1000); 3945 poll_threads(); 3946 3947 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3948 poll_threads(); 3949 3950 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3951 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3952 3953 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3954 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3955 3956 ch = spdk_get_io_channel(bdev); 3957 SPDK_CU_ASSERT_FATAL(ch != NULL); 3958 3959 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3960 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3961 3962 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3963 * submitted to ctrlr2. 3964 */ 3965 ctrlr1->is_failed = true; 3966 bdev_io->internal.in_submit_request = true; 3967 3968 bdev_nvme_submit_request(ch, bdev_io); 3969 3970 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3971 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3972 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3973 3974 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3975 poll_threads(); 3976 3977 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3978 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3979 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3980 3981 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3982 ctrlr2->is_failed = true; 3983 bdev_io->internal.in_submit_request = true; 3984 3985 bdev_nvme_submit_request(ch, bdev_io); 3986 3987 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3988 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3989 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3990 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3991 3992 free(bdev_io); 3993 3994 spdk_put_io_channel(ch); 3995 3996 poll_threads(); 3997 3998 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3999 CU_ASSERT(rc == 0); 4000 4001 poll_threads(); 4002 spdk_delay_us(1000); 4003 poll_threads(); 4004 4005 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4006 } 4007 4008 static struct nvme_io_path * 4009 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 4010 struct nvme_ctrlr *nvme_ctrlr) 4011 { 4012 struct nvme_io_path *io_path; 4013 4014 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 4015 if (io_path->qpair->ctrlr == nvme_ctrlr) { 4016 return io_path; 4017 } 4018 } 4019 4020 return NULL; 4021 } 4022 4023 static void 4024 test_reset_bdev_ctrlr(void) 4025 { 4026 struct nvme_path_id path1 = {}, path2 = {}; 4027 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4028 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4029 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4030 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4031 struct nvme_path_id *curr_path1, *curr_path2; 4032 const int STRING_SIZE = 32; 4033 const char *attached_names[STRING_SIZE]; 4034 struct nvme_bdev *bdev; 4035 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4036 struct nvme_bdev_io *first_bio; 4037 struct spdk_io_channel *ch1, *ch2; 4038 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4039 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4040 int rc; 4041 4042 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4043 ut_init_trid(&path1.trid); 4044 ut_init_trid2(&path2.trid); 4045 g_ut_attach_ctrlr_status = 0; 4046 g_ut_attach_bdev_count = 1; 4047 4048 set_thread(0); 4049 4050 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4051 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4052 4053 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4054 attach_ctrlr_done, NULL, &opts, NULL, true); 4055 CU_ASSERT(rc == 0); 4056 4057 spdk_delay_us(1000); 4058 poll_threads(); 4059 4060 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4061 poll_threads(); 4062 4063 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4064 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4065 4066 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4067 attach_ctrlr_done, NULL, &opts, NULL, true); 4068 CU_ASSERT(rc == 0); 4069 4070 spdk_delay_us(1000); 4071 poll_threads(); 4072 4073 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4074 poll_threads(); 4075 4076 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4077 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4078 4079 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4080 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4081 4082 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4083 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4084 4085 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4086 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4087 4088 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4089 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4090 4091 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4092 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4093 4094 set_thread(0); 4095 4096 ch1 = spdk_get_io_channel(bdev); 4097 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4098 4099 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4100 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4101 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4102 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4103 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4104 4105 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4106 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4107 4108 set_thread(1); 4109 4110 ch2 = spdk_get_io_channel(bdev); 4111 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4112 4113 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4114 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4115 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4116 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4117 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4118 4119 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4120 4121 /* The first reset request from bdev_io is submitted on thread 0. 4122 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4123 * 4124 * A few extra polls are necessary after resetting ctrlr1 to check 4125 * pending reset requests for ctrlr1. 4126 */ 4127 ctrlr1->is_failed = true; 4128 curr_path1->last_failed_tsc = spdk_get_ticks(); 4129 ctrlr2->is_failed = true; 4130 curr_path2->last_failed_tsc = spdk_get_ticks(); 4131 4132 set_thread(0); 4133 4134 bdev_nvme_submit_request(ch1, first_bdev_io); 4135 CU_ASSERT(first_bio->io_path == io_path11); 4136 CU_ASSERT(nvme_ctrlr1->resetting == true); 4137 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4138 4139 poll_thread_times(0, 3); 4140 CU_ASSERT(io_path11->qpair->qpair == NULL); 4141 CU_ASSERT(io_path21->qpair->qpair != NULL); 4142 4143 poll_thread_times(1, 2); 4144 CU_ASSERT(io_path11->qpair->qpair == NULL); 4145 CU_ASSERT(io_path21->qpair->qpair == NULL); 4146 CU_ASSERT(ctrlr1->is_failed == true); 4147 4148 poll_thread_times(0, 1); 4149 CU_ASSERT(nvme_ctrlr1->resetting == true); 4150 CU_ASSERT(ctrlr1->is_failed == false); 4151 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4152 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4153 4154 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4155 poll_thread_times(0, 2); 4156 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4157 4158 poll_thread_times(0, 1); 4159 CU_ASSERT(io_path11->qpair->qpair != NULL); 4160 CU_ASSERT(io_path21->qpair->qpair == NULL); 4161 4162 poll_thread_times(1, 1); 4163 CU_ASSERT(io_path11->qpair->qpair != NULL); 4164 CU_ASSERT(io_path21->qpair->qpair != NULL); 4165 4166 poll_thread_times(0, 2); 4167 CU_ASSERT(nvme_ctrlr1->resetting == true); 4168 poll_thread_times(1, 1); 4169 CU_ASSERT(nvme_ctrlr1->resetting == true); 4170 poll_thread_times(0, 2); 4171 CU_ASSERT(nvme_ctrlr1->resetting == false); 4172 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4173 CU_ASSERT(first_bio->io_path == io_path12); 4174 CU_ASSERT(nvme_ctrlr2->resetting == true); 4175 4176 poll_thread_times(0, 3); 4177 CU_ASSERT(io_path12->qpair->qpair == NULL); 4178 CU_ASSERT(io_path22->qpair->qpair != NULL); 4179 4180 poll_thread_times(1, 2); 4181 CU_ASSERT(io_path12->qpair->qpair == NULL); 4182 CU_ASSERT(io_path22->qpair->qpair == NULL); 4183 CU_ASSERT(ctrlr2->is_failed == true); 4184 4185 poll_thread_times(0, 1); 4186 CU_ASSERT(nvme_ctrlr2->resetting == true); 4187 CU_ASSERT(ctrlr2->is_failed == false); 4188 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4189 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4190 4191 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4192 poll_thread_times(0, 2); 4193 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4194 4195 poll_thread_times(0, 1); 4196 CU_ASSERT(io_path12->qpair->qpair != NULL); 4197 CU_ASSERT(io_path22->qpair->qpair == NULL); 4198 4199 poll_thread_times(1, 2); 4200 CU_ASSERT(io_path12->qpair->qpair != NULL); 4201 CU_ASSERT(io_path22->qpair->qpair != NULL); 4202 4203 poll_thread_times(0, 2); 4204 CU_ASSERT(nvme_ctrlr2->resetting == true); 4205 poll_thread_times(1, 1); 4206 CU_ASSERT(nvme_ctrlr2->resetting == true); 4207 poll_thread_times(0, 2); 4208 CU_ASSERT(first_bio->io_path == NULL); 4209 CU_ASSERT(nvme_ctrlr2->resetting == false); 4210 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4211 4212 poll_threads(); 4213 4214 /* There is a race between two reset requests from bdev_io. 4215 * 4216 * The first reset request is submitted on thread 0, and the second reset 4217 * request is submitted on thread 1 while the first is resetting ctrlr1. 4218 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4219 * both reset requests go to ctrlr2. The first comes earlier than the second. 4220 * The second is pending on ctrlr2 again. After the first completes resetting 4221 * ctrl2, both complete successfully. 4222 */ 4223 ctrlr1->is_failed = true; 4224 curr_path1->last_failed_tsc = spdk_get_ticks(); 4225 ctrlr2->is_failed = true; 4226 curr_path2->last_failed_tsc = spdk_get_ticks(); 4227 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4228 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4229 4230 set_thread(0); 4231 4232 bdev_nvme_submit_request(ch1, first_bdev_io); 4233 4234 set_thread(1); 4235 4236 bdev_nvme_submit_request(ch2, second_bdev_io); 4237 4238 CU_ASSERT(nvme_ctrlr1->resetting == true); 4239 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4240 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == 4241 (struct nvme_bdev_io *)second_bdev_io->driver_ctx); 4242 4243 poll_threads(); 4244 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4245 poll_threads(); 4246 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4247 poll_threads(); 4248 4249 CU_ASSERT(ctrlr1->is_failed == false); 4250 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4251 CU_ASSERT(ctrlr2->is_failed == false); 4252 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4253 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4254 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4255 4256 set_thread(0); 4257 4258 spdk_put_io_channel(ch1); 4259 4260 set_thread(1); 4261 4262 spdk_put_io_channel(ch2); 4263 4264 poll_threads(); 4265 4266 set_thread(0); 4267 4268 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4269 CU_ASSERT(rc == 0); 4270 4271 poll_threads(); 4272 spdk_delay_us(1000); 4273 poll_threads(); 4274 4275 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4276 4277 free(first_bdev_io); 4278 free(second_bdev_io); 4279 } 4280 4281 static void 4282 test_find_io_path(void) 4283 { 4284 struct nvme_bdev_channel nbdev_ch = { 4285 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4286 }; 4287 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4288 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4289 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4290 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4291 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4292 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4293 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4294 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4295 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4296 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4297 4298 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4299 4300 /* Test if io_path whose ANA state is not accessible is excluded. */ 4301 4302 nvme_qpair1.qpair = &qpair1; 4303 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4304 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4305 4306 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4307 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4308 4309 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4310 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4311 4312 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4313 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4314 4315 nbdev_ch.current_io_path = NULL; 4316 4317 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4318 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4319 4320 nbdev_ch.current_io_path = NULL; 4321 4322 /* Test if io_path whose qpair is resetting is excluded. */ 4323 4324 nvme_qpair1.qpair = NULL; 4325 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4326 4327 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4328 4329 /* Test if ANA optimized state or the first found ANA non-optimized state 4330 * is prioritized. 4331 */ 4332 4333 nvme_qpair1.qpair = &qpair1; 4334 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4335 nvme_qpair2.qpair = &qpair2; 4336 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4337 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4338 4339 nbdev_ch.current_io_path = NULL; 4340 4341 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4342 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4343 4344 nbdev_ch.current_io_path = NULL; 4345 } 4346 4347 static void 4348 test_retry_io_if_ana_state_is_updating(void) 4349 { 4350 struct nvme_path_id path = {}; 4351 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 4352 struct spdk_nvme_ctrlr *ctrlr; 4353 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 4354 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4355 struct nvme_ctrlr *nvme_ctrlr; 4356 const int STRING_SIZE = 32; 4357 const char *attached_names[STRING_SIZE]; 4358 struct nvme_bdev *bdev; 4359 struct nvme_ns *nvme_ns; 4360 struct spdk_bdev_io *bdev_io1; 4361 struct spdk_io_channel *ch; 4362 struct nvme_bdev_channel *nbdev_ch; 4363 struct nvme_io_path *io_path; 4364 struct nvme_qpair *nvme_qpair; 4365 int rc; 4366 4367 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4368 ut_init_trid(&path.trid); 4369 4370 set_thread(0); 4371 4372 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4373 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4374 4375 g_ut_attach_ctrlr_status = 0; 4376 g_ut_attach_bdev_count = 1; 4377 4378 opts.ctrlr_loss_timeout_sec = -1; 4379 opts.reconnect_delay_sec = 1; 4380 4381 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4382 attach_ctrlr_done, NULL, &dopts, &opts, false); 4383 CU_ASSERT(rc == 0); 4384 4385 spdk_delay_us(1000); 4386 poll_threads(); 4387 4388 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4389 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4390 4391 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 4392 CU_ASSERT(nvme_ctrlr != NULL); 4393 4394 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4395 CU_ASSERT(bdev != NULL); 4396 4397 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4398 CU_ASSERT(nvme_ns != NULL); 4399 4400 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4401 ut_bdev_io_set_buf(bdev_io1); 4402 4403 ch = spdk_get_io_channel(bdev); 4404 SPDK_CU_ASSERT_FATAL(ch != NULL); 4405 4406 nbdev_ch = spdk_io_channel_get_ctx(ch); 4407 4408 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4409 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4410 4411 nvme_qpair = io_path->qpair; 4412 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4413 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4414 4415 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4416 4417 /* If qpair is connected, I/O should succeed. */ 4418 bdev_io1->internal.in_submit_request = true; 4419 4420 bdev_nvme_submit_request(ch, bdev_io1); 4421 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4422 4423 poll_threads(); 4424 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4425 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4426 4427 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4428 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4429 nbdev_ch->current_io_path = NULL; 4430 4431 bdev_io1->internal.in_submit_request = true; 4432 4433 bdev_nvme_submit_request(ch, bdev_io1); 4434 4435 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4436 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4437 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4438 4439 /* ANA state became accessible while I/O was queued. */ 4440 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4441 4442 spdk_delay_us(1000000); 4443 4444 poll_thread_times(0, 1); 4445 4446 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4447 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4448 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4449 4450 poll_threads(); 4451 4452 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4453 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4454 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4455 4456 free(bdev_io1); 4457 4458 spdk_put_io_channel(ch); 4459 4460 poll_threads(); 4461 4462 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4463 CU_ASSERT(rc == 0); 4464 4465 poll_threads(); 4466 spdk_delay_us(1000); 4467 poll_threads(); 4468 4469 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4470 } 4471 4472 static void 4473 test_retry_io_for_io_path_error(void) 4474 { 4475 struct nvme_path_id path1 = {}, path2 = {}; 4476 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4477 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4478 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4479 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4480 const int STRING_SIZE = 32; 4481 const char *attached_names[STRING_SIZE]; 4482 struct nvme_bdev *bdev; 4483 struct nvme_ns *nvme_ns1, *nvme_ns2; 4484 struct spdk_bdev_io *bdev_io; 4485 struct nvme_bdev_io *bio; 4486 struct spdk_io_channel *ch; 4487 struct nvme_bdev_channel *nbdev_ch; 4488 struct nvme_io_path *io_path1, *io_path2; 4489 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4490 struct ut_nvme_req *req; 4491 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4492 int rc; 4493 4494 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4495 ut_init_trid(&path1.trid); 4496 ut_init_trid2(&path2.trid); 4497 4498 g_opts.bdev_retry_count = 1; 4499 4500 set_thread(0); 4501 4502 g_ut_attach_ctrlr_status = 0; 4503 g_ut_attach_bdev_count = 1; 4504 4505 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4506 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4507 4508 ctrlr1->ns[0].uuid = &uuid1; 4509 4510 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4511 attach_ctrlr_done, NULL, &opts, NULL, true); 4512 CU_ASSERT(rc == 0); 4513 4514 spdk_delay_us(1000); 4515 poll_threads(); 4516 4517 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4518 poll_threads(); 4519 4520 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4521 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4522 4523 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4524 CU_ASSERT(nvme_ctrlr1 != NULL); 4525 4526 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4527 CU_ASSERT(bdev != NULL); 4528 4529 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4530 CU_ASSERT(nvme_ns1 != NULL); 4531 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4532 4533 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4534 ut_bdev_io_set_buf(bdev_io); 4535 4536 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4537 4538 ch = spdk_get_io_channel(bdev); 4539 SPDK_CU_ASSERT_FATAL(ch != NULL); 4540 4541 nbdev_ch = spdk_io_channel_get_ctx(ch); 4542 4543 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4544 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4545 4546 nvme_qpair1 = io_path1->qpair; 4547 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4548 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4549 4550 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4551 4552 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4553 bdev_io->internal.in_submit_request = true; 4554 4555 bdev_nvme_submit_request(ch, bdev_io); 4556 4557 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4558 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4559 4560 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4561 SPDK_CU_ASSERT_FATAL(req != NULL); 4562 4563 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4564 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4565 req->cpl.status.dnr = 1; 4566 4567 poll_thread_times(0, 1); 4568 4569 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4570 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4571 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4572 4573 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4574 bdev_io->internal.in_submit_request = true; 4575 4576 bdev_nvme_submit_request(ch, bdev_io); 4577 4578 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4579 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4580 4581 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4582 SPDK_CU_ASSERT_FATAL(req != NULL); 4583 4584 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4585 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4586 4587 poll_thread_times(0, 1); 4588 4589 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4590 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4591 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4592 4593 poll_threads(); 4594 4595 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4596 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4597 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4598 4599 /* Add io_path2 dynamically, and create a multipath configuration. */ 4600 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4601 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4602 4603 ctrlr2->ns[0].uuid = &uuid1; 4604 4605 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4606 attach_ctrlr_done, NULL, &opts, NULL, true); 4607 CU_ASSERT(rc == 0); 4608 4609 spdk_delay_us(1000); 4610 poll_threads(); 4611 4612 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4613 poll_threads(); 4614 4615 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4616 CU_ASSERT(nvme_ctrlr2 != NULL); 4617 4618 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4619 CU_ASSERT(nvme_ns2 != NULL); 4620 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4621 4622 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4623 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4624 4625 nvme_qpair2 = io_path2->qpair; 4626 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4627 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4628 4629 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4630 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4631 * So after a retry, I/O is submitted to io_path2 and should succeed. 4632 */ 4633 bdev_io->internal.in_submit_request = true; 4634 4635 bdev_nvme_submit_request(ch, bdev_io); 4636 4637 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4638 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4639 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4640 4641 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4642 SPDK_CU_ASSERT_FATAL(req != NULL); 4643 4644 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4645 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4646 4647 poll_thread_times(0, 1); 4648 4649 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4650 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4651 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4652 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4653 4654 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4655 nvme_qpair1->qpair = NULL; 4656 4657 poll_threads(); 4658 4659 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4660 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4661 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4662 4663 free(bdev_io); 4664 4665 spdk_put_io_channel(ch); 4666 4667 poll_threads(); 4668 4669 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4670 CU_ASSERT(rc == 0); 4671 4672 poll_threads(); 4673 spdk_delay_us(1000); 4674 poll_threads(); 4675 4676 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4677 4678 g_opts.bdev_retry_count = 0; 4679 } 4680 4681 static void 4682 test_retry_io_count(void) 4683 { 4684 struct nvme_path_id path = {}; 4685 struct spdk_nvme_ctrlr *ctrlr; 4686 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4687 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4688 struct nvme_ctrlr *nvme_ctrlr; 4689 const int STRING_SIZE = 32; 4690 const char *attached_names[STRING_SIZE]; 4691 struct nvme_bdev *bdev; 4692 struct nvme_ns *nvme_ns; 4693 struct spdk_bdev_io *bdev_io; 4694 struct nvme_bdev_io *bio; 4695 struct spdk_io_channel *ch; 4696 struct nvme_bdev_channel *nbdev_ch; 4697 struct nvme_io_path *io_path; 4698 struct nvme_qpair *nvme_qpair; 4699 struct ut_nvme_req *req; 4700 int rc; 4701 4702 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4703 ut_init_trid(&path.trid); 4704 4705 set_thread(0); 4706 4707 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4708 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4709 4710 g_ut_attach_ctrlr_status = 0; 4711 g_ut_attach_bdev_count = 1; 4712 4713 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4714 attach_ctrlr_done, NULL, &opts, NULL, false); 4715 CU_ASSERT(rc == 0); 4716 4717 spdk_delay_us(1000); 4718 poll_threads(); 4719 4720 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4721 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4722 4723 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 4724 CU_ASSERT(nvme_ctrlr != NULL); 4725 4726 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4727 CU_ASSERT(bdev != NULL); 4728 4729 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4730 CU_ASSERT(nvme_ns != NULL); 4731 4732 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4733 ut_bdev_io_set_buf(bdev_io); 4734 4735 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4736 4737 ch = spdk_get_io_channel(bdev); 4738 SPDK_CU_ASSERT_FATAL(ch != NULL); 4739 4740 nbdev_ch = spdk_io_channel_get_ctx(ch); 4741 4742 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4743 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4744 4745 nvme_qpair = io_path->qpair; 4746 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4747 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4748 4749 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4750 4751 /* If I/O is aborted by request, it should not be retried. */ 4752 g_opts.bdev_retry_count = 1; 4753 4754 bdev_io->internal.in_submit_request = true; 4755 4756 bdev_nvme_submit_request(ch, bdev_io); 4757 4758 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4759 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4760 4761 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4762 SPDK_CU_ASSERT_FATAL(req != NULL); 4763 4764 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4765 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4766 4767 poll_thread_times(0, 1); 4768 4769 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4770 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4771 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4772 4773 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4774 * the failed I/O should not be retried. 4775 */ 4776 g_opts.bdev_retry_count = 4; 4777 4778 bdev_io->internal.in_submit_request = true; 4779 4780 bdev_nvme_submit_request(ch, bdev_io); 4781 4782 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4783 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4784 4785 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4786 SPDK_CU_ASSERT_FATAL(req != NULL); 4787 4788 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4789 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4790 bio->retry_count = 4; 4791 4792 poll_thread_times(0, 1); 4793 4794 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4795 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4796 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4797 4798 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4799 g_opts.bdev_retry_count = -1; 4800 4801 bdev_io->internal.in_submit_request = true; 4802 4803 bdev_nvme_submit_request(ch, bdev_io); 4804 4805 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4806 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4807 4808 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4809 SPDK_CU_ASSERT_FATAL(req != NULL); 4810 4811 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4812 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4813 bio->retry_count = 4; 4814 4815 poll_thread_times(0, 1); 4816 4817 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4818 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4819 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4820 4821 poll_threads(); 4822 4823 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4824 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4825 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4826 4827 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4828 * the failed I/O should be retried. 4829 */ 4830 g_opts.bdev_retry_count = 4; 4831 4832 bdev_io->internal.in_submit_request = true; 4833 4834 bdev_nvme_submit_request(ch, bdev_io); 4835 4836 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4837 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4838 4839 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4840 SPDK_CU_ASSERT_FATAL(req != NULL); 4841 4842 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4843 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4844 bio->retry_count = 3; 4845 4846 poll_thread_times(0, 1); 4847 4848 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4849 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4850 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4851 4852 poll_threads(); 4853 4854 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4855 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4856 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4857 4858 free(bdev_io); 4859 4860 spdk_put_io_channel(ch); 4861 4862 poll_threads(); 4863 4864 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4865 CU_ASSERT(rc == 0); 4866 4867 poll_threads(); 4868 spdk_delay_us(1000); 4869 poll_threads(); 4870 4871 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4872 4873 g_opts.bdev_retry_count = 0; 4874 } 4875 4876 static void 4877 test_concurrent_read_ana_log_page(void) 4878 { 4879 struct spdk_nvme_transport_id trid = {}; 4880 struct spdk_nvme_ctrlr *ctrlr; 4881 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4882 struct nvme_ctrlr *nvme_ctrlr; 4883 const int STRING_SIZE = 32; 4884 const char *attached_names[STRING_SIZE]; 4885 int rc; 4886 4887 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4888 ut_init_trid(&trid); 4889 4890 set_thread(0); 4891 4892 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4893 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4894 4895 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4896 4897 g_ut_attach_ctrlr_status = 0; 4898 g_ut_attach_bdev_count = 1; 4899 4900 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4901 attach_ctrlr_done, NULL, &opts, NULL, false); 4902 CU_ASSERT(rc == 0); 4903 4904 spdk_delay_us(1000); 4905 poll_threads(); 4906 4907 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4908 poll_threads(); 4909 4910 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4911 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4912 4913 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4914 4915 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4916 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4917 4918 /* Following read request should be rejected. */ 4919 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4920 4921 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4922 4923 set_thread(1); 4924 4925 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4926 4927 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4928 4929 /* Reset request while reading ANA log page should not be rejected. */ 4930 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4931 CU_ASSERT(rc == 0); 4932 4933 poll_threads(); 4934 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4935 poll_threads(); 4936 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4937 poll_threads(); 4938 4939 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4940 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4941 4942 /* Read ANA log page while resetting ctrlr should be rejected. */ 4943 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4944 CU_ASSERT(rc == 0); 4945 4946 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4947 4948 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4949 4950 poll_threads(); 4951 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4952 poll_threads(); 4953 4954 set_thread(0); 4955 4956 /* It is possible that target sent ANA change for inactive namespaces. 4957 * 4958 * Previously, assert() was added because this case was unlikely. 4959 * However, assert() was hit in real environment. 4960 4961 * Hence, remove assert() and add unit test case. 4962 * 4963 * Simulate this case by depopulating namespaces and then parsing ANA 4964 * log page created when all namespaces are active. 4965 * Then, check if parsing ANA log page completes successfully. 4966 */ 4967 nvme_ctrlr_depopulate_namespaces(nvme_ctrlr); 4968 4969 rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr); 4970 CU_ASSERT(rc == 0); 4971 4972 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4973 CU_ASSERT(rc == 0); 4974 4975 poll_threads(); 4976 spdk_delay_us(1000); 4977 poll_threads(); 4978 4979 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4980 } 4981 4982 static void 4983 test_retry_io_for_ana_error(void) 4984 { 4985 struct nvme_path_id path = {}; 4986 struct spdk_nvme_ctrlr *ctrlr; 4987 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4988 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4989 struct nvme_ctrlr *nvme_ctrlr; 4990 const int STRING_SIZE = 32; 4991 const char *attached_names[STRING_SIZE]; 4992 struct nvme_bdev *bdev; 4993 struct nvme_ns *nvme_ns; 4994 struct spdk_bdev_io *bdev_io; 4995 struct nvme_bdev_io *bio; 4996 struct spdk_io_channel *ch; 4997 struct nvme_bdev_channel *nbdev_ch; 4998 struct nvme_io_path *io_path; 4999 struct nvme_qpair *nvme_qpair; 5000 struct ut_nvme_req *req; 5001 uint64_t now; 5002 int rc; 5003 5004 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5005 ut_init_trid(&path.trid); 5006 5007 g_opts.bdev_retry_count = 1; 5008 5009 set_thread(0); 5010 5011 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 5012 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5013 5014 g_ut_attach_ctrlr_status = 0; 5015 g_ut_attach_bdev_count = 1; 5016 5017 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5018 attach_ctrlr_done, NULL, &opts, NULL, false); 5019 CU_ASSERT(rc == 0); 5020 5021 spdk_delay_us(1000); 5022 poll_threads(); 5023 5024 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5025 poll_threads(); 5026 5027 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5028 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5029 5030 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 5031 CU_ASSERT(nvme_ctrlr != NULL); 5032 5033 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5034 CU_ASSERT(bdev != NULL); 5035 5036 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5037 CU_ASSERT(nvme_ns != NULL); 5038 5039 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5040 ut_bdev_io_set_buf(bdev_io); 5041 5042 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5043 5044 ch = spdk_get_io_channel(bdev); 5045 SPDK_CU_ASSERT_FATAL(ch != NULL); 5046 5047 nbdev_ch = spdk_io_channel_get_ctx(ch); 5048 5049 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5050 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5051 5052 nvme_qpair = io_path->qpair; 5053 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5054 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5055 5056 now = spdk_get_ticks(); 5057 5058 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5059 5060 /* If I/O got ANA error, it should be queued, the corresponding namespace 5061 * should be freezed and its ANA state should be updated. 5062 */ 5063 bdev_io->internal.in_submit_request = true; 5064 5065 bdev_nvme_submit_request(ch, bdev_io); 5066 5067 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5068 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5069 5070 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5071 SPDK_CU_ASSERT_FATAL(req != NULL); 5072 5073 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5074 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5075 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5076 5077 poll_thread_times(0, 1); 5078 5079 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5080 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5081 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5082 /* I/O should be retried immediately. */ 5083 CU_ASSERT(bio->retry_ticks == now); 5084 CU_ASSERT(nvme_ns->ana_state_updating == true); 5085 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5086 5087 poll_threads(); 5088 5089 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5090 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5091 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5092 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5093 /* I/O should be retried after a second if no I/O path was found but 5094 * any I/O path may become available. 5095 */ 5096 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5097 5098 /* Namespace should be unfreezed after completing to update its ANA state. */ 5099 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5100 poll_threads(); 5101 5102 CU_ASSERT(nvme_ns->ana_state_updating == false); 5103 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5104 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5105 5106 /* Retry the queued I/O should succeed. */ 5107 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5108 poll_threads(); 5109 5110 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5111 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5112 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5113 5114 free(bdev_io); 5115 5116 spdk_put_io_channel(ch); 5117 5118 poll_threads(); 5119 5120 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5121 CU_ASSERT(rc == 0); 5122 5123 poll_threads(); 5124 spdk_delay_us(1000); 5125 poll_threads(); 5126 5127 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5128 5129 g_opts.bdev_retry_count = 0; 5130 } 5131 5132 static void 5133 test_check_io_error_resiliency_params(void) 5134 { 5135 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5136 * 3rd parameter is fast_io_fail_timeout_sec. 5137 */ 5138 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5139 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5140 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5141 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5142 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5143 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5144 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5145 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5146 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5147 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5148 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5149 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5150 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5151 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5152 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5153 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5154 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5155 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5156 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5157 } 5158 5159 static void 5160 test_retry_io_if_ctrlr_is_resetting(void) 5161 { 5162 struct nvme_path_id path = {}; 5163 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5164 struct spdk_nvme_ctrlr *ctrlr; 5165 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5166 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5167 struct nvme_ctrlr *nvme_ctrlr; 5168 const int STRING_SIZE = 32; 5169 const char *attached_names[STRING_SIZE]; 5170 struct nvme_bdev *bdev; 5171 struct nvme_ns *nvme_ns; 5172 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5173 struct spdk_io_channel *ch; 5174 struct nvme_bdev_channel *nbdev_ch; 5175 struct nvme_io_path *io_path; 5176 struct nvme_qpair *nvme_qpair; 5177 int rc; 5178 5179 g_opts.bdev_retry_count = 1; 5180 5181 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5182 ut_init_trid(&path.trid); 5183 5184 set_thread(0); 5185 5186 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5187 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5188 5189 g_ut_attach_ctrlr_status = 0; 5190 g_ut_attach_bdev_count = 1; 5191 5192 opts.ctrlr_loss_timeout_sec = -1; 5193 opts.reconnect_delay_sec = 1; 5194 5195 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5196 attach_ctrlr_done, NULL, &dopts, &opts, false); 5197 CU_ASSERT(rc == 0); 5198 5199 spdk_delay_us(1000); 5200 poll_threads(); 5201 5202 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5203 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5204 5205 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5206 CU_ASSERT(nvme_ctrlr != NULL); 5207 5208 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5209 CU_ASSERT(bdev != NULL); 5210 5211 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5212 CU_ASSERT(nvme_ns != NULL); 5213 5214 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5215 ut_bdev_io_set_buf(bdev_io1); 5216 5217 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5218 ut_bdev_io_set_buf(bdev_io2); 5219 5220 ch = spdk_get_io_channel(bdev); 5221 SPDK_CU_ASSERT_FATAL(ch != NULL); 5222 5223 nbdev_ch = spdk_io_channel_get_ctx(ch); 5224 5225 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5226 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5227 5228 nvme_qpair = io_path->qpair; 5229 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5230 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5231 5232 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5233 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5234 5235 /* If qpair is connected, I/O should succeed. */ 5236 bdev_io1->internal.in_submit_request = true; 5237 5238 bdev_nvme_submit_request(ch, bdev_io1); 5239 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5240 5241 poll_threads(); 5242 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5243 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5244 5245 /* If qpair is disconnected, it is freed and then reconnected via resetting 5246 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5247 * while resetting the nvme_ctrlr. 5248 */ 5249 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5250 ctrlr->is_failed = true; 5251 5252 poll_thread_times(0, 5); 5253 5254 CU_ASSERT(nvme_qpair->qpair == NULL); 5255 CU_ASSERT(nvme_ctrlr->resetting == true); 5256 CU_ASSERT(ctrlr->is_failed == false); 5257 5258 bdev_io1->internal.in_submit_request = true; 5259 5260 bdev_nvme_submit_request(ch, bdev_io1); 5261 5262 spdk_delay_us(1); 5263 5264 bdev_io2->internal.in_submit_request = true; 5265 5266 bdev_nvme_submit_request(ch, bdev_io2); 5267 5268 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5269 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5270 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5271 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx( 5272 TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx, 5273 retry_link))); 5274 5275 poll_threads(); 5276 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5277 poll_threads(); 5278 5279 CU_ASSERT(nvme_qpair->qpair != NULL); 5280 CU_ASSERT(nvme_ctrlr->resetting == false); 5281 5282 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5283 5284 poll_thread_times(0, 1); 5285 5286 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5287 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5288 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5289 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5290 5291 poll_threads(); 5292 5293 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5294 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5295 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5296 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5297 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5298 5299 spdk_delay_us(1); 5300 5301 poll_thread_times(0, 1); 5302 5303 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5304 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5305 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5306 5307 poll_threads(); 5308 5309 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5310 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5311 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5312 5313 free(bdev_io1); 5314 free(bdev_io2); 5315 5316 spdk_put_io_channel(ch); 5317 5318 poll_threads(); 5319 5320 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5321 CU_ASSERT(rc == 0); 5322 5323 poll_threads(); 5324 spdk_delay_us(1000); 5325 poll_threads(); 5326 5327 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5328 5329 g_opts.bdev_retry_count = 0; 5330 } 5331 5332 static void 5333 test_reconnect_ctrlr(void) 5334 { 5335 struct spdk_nvme_transport_id trid = {}; 5336 struct spdk_nvme_ctrlr ctrlr = {}; 5337 struct nvme_ctrlr *nvme_ctrlr; 5338 struct spdk_io_channel *ch1, *ch2; 5339 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5340 int rc; 5341 5342 ut_init_trid(&trid); 5343 TAILQ_INIT(&ctrlr.active_io_qpairs); 5344 5345 set_thread(0); 5346 5347 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5348 CU_ASSERT(rc == 0); 5349 5350 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5351 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5352 5353 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5354 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5355 5356 ch1 = spdk_get_io_channel(nvme_ctrlr); 5357 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5358 5359 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5360 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5361 5362 set_thread(1); 5363 5364 ch2 = spdk_get_io_channel(nvme_ctrlr); 5365 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5366 5367 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5368 5369 /* Reset starts from thread 1. */ 5370 set_thread(1); 5371 5372 /* The reset should fail and a reconnect timer should be registered. */ 5373 ctrlr.fail_reset = true; 5374 ctrlr.is_failed = true; 5375 5376 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5377 CU_ASSERT(rc == 0); 5378 CU_ASSERT(nvme_ctrlr->resetting == true); 5379 CU_ASSERT(ctrlr.is_failed == true); 5380 5381 poll_threads(); 5382 5383 CU_ASSERT(nvme_ctrlr->resetting == false); 5384 CU_ASSERT(ctrlr.is_failed == false); 5385 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5386 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5387 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5388 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5389 5390 /* A new reset starts from thread 0. */ 5391 set_thread(1); 5392 5393 /* The reset should cancel the reconnect timer and should start from reconnection. 5394 * Then, the reset should fail and a reconnect timer should be registered again. 5395 */ 5396 ctrlr.fail_reset = true; 5397 ctrlr.is_failed = true; 5398 5399 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5400 CU_ASSERT(rc == 0); 5401 CU_ASSERT(nvme_ctrlr->resetting == true); 5402 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5403 CU_ASSERT(ctrlr.is_failed == true); 5404 5405 poll_threads(); 5406 5407 CU_ASSERT(nvme_ctrlr->resetting == false); 5408 CU_ASSERT(ctrlr.is_failed == false); 5409 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5410 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5411 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5412 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5413 5414 /* Then a reconnect retry should suceeed. */ 5415 ctrlr.fail_reset = false; 5416 5417 spdk_delay_us(SPDK_SEC_TO_USEC); 5418 poll_thread_times(0, 1); 5419 5420 CU_ASSERT(nvme_ctrlr->resetting == true); 5421 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5422 5423 poll_threads(); 5424 5425 CU_ASSERT(nvme_ctrlr->resetting == false); 5426 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5427 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5428 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5429 5430 /* The reset should fail and a reconnect timer should be registered. */ 5431 ctrlr.fail_reset = true; 5432 ctrlr.is_failed = true; 5433 5434 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5435 CU_ASSERT(rc == 0); 5436 CU_ASSERT(nvme_ctrlr->resetting == true); 5437 CU_ASSERT(ctrlr.is_failed == true); 5438 5439 poll_threads(); 5440 5441 CU_ASSERT(nvme_ctrlr->resetting == false); 5442 CU_ASSERT(ctrlr.is_failed == false); 5443 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5444 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5445 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5446 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5447 5448 /* Then a reconnect retry should still fail. */ 5449 spdk_delay_us(SPDK_SEC_TO_USEC); 5450 poll_thread_times(0, 1); 5451 5452 CU_ASSERT(nvme_ctrlr->resetting == true); 5453 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5454 5455 poll_threads(); 5456 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5457 poll_threads(); 5458 5459 CU_ASSERT(nvme_ctrlr->resetting == false); 5460 CU_ASSERT(ctrlr.is_failed == false); 5461 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5462 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5463 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5464 5465 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5466 spdk_delay_us(SPDK_SEC_TO_USEC); 5467 poll_threads(); 5468 5469 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5470 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5471 CU_ASSERT(nvme_ctrlr->destruct == true); 5472 5473 spdk_put_io_channel(ch2); 5474 5475 set_thread(0); 5476 5477 spdk_put_io_channel(ch1); 5478 5479 poll_threads(); 5480 spdk_delay_us(1000); 5481 poll_threads(); 5482 5483 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5484 } 5485 5486 static struct nvme_path_id * 5487 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5488 const struct spdk_nvme_transport_id *trid) 5489 { 5490 struct nvme_path_id *p; 5491 5492 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5493 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5494 break; 5495 } 5496 } 5497 5498 return p; 5499 } 5500 5501 static void 5502 test_retry_failover_ctrlr(void) 5503 { 5504 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5505 struct spdk_nvme_ctrlr ctrlr = {}; 5506 struct nvme_ctrlr *nvme_ctrlr = NULL; 5507 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5508 struct spdk_io_channel *ch; 5509 struct nvme_ctrlr_channel *ctrlr_ch; 5510 int rc; 5511 5512 ut_init_trid(&trid1); 5513 ut_init_trid2(&trid2); 5514 ut_init_trid3(&trid3); 5515 TAILQ_INIT(&ctrlr.active_io_qpairs); 5516 5517 set_thread(0); 5518 5519 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5520 CU_ASSERT(rc == 0); 5521 5522 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5523 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5524 5525 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5526 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5527 5528 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5529 CU_ASSERT(rc == 0); 5530 5531 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5532 CU_ASSERT(rc == 0); 5533 5534 ch = spdk_get_io_channel(nvme_ctrlr); 5535 SPDK_CU_ASSERT_FATAL(ch != NULL); 5536 5537 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5538 5539 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5540 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5541 CU_ASSERT(path_id1->last_failed_tsc == 0); 5542 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5543 5544 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5545 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5546 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5547 5548 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5549 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5550 5551 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5552 * and a reconnect timer is started. */ 5553 ctrlr.fail_reset = true; 5554 ctrlr.is_failed = true; 5555 5556 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5557 CU_ASSERT(rc == 0); 5558 5559 poll_threads(); 5560 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5561 poll_threads(); 5562 5563 CU_ASSERT(nvme_ctrlr->resetting == false); 5564 CU_ASSERT(ctrlr.is_failed == false); 5565 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5566 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5567 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5568 CU_ASSERT(path_id1->last_failed_tsc != 0); 5569 5570 CU_ASSERT(path_id2->last_failed_tsc != 0); 5571 CU_ASSERT(path_id3->last_failed_tsc != 0); 5572 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5573 5574 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5575 * switched to trid2 but reset is not started. 5576 */ 5577 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5578 CU_ASSERT(rc == -EALREADY); 5579 5580 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5581 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5582 5583 CU_ASSERT(nvme_ctrlr->resetting == false); 5584 5585 /* If reconnect succeeds, trid2 should be the active path_id */ 5586 ctrlr.fail_reset = false; 5587 5588 spdk_delay_us(SPDK_SEC_TO_USEC); 5589 poll_thread_times(0, 1); 5590 5591 CU_ASSERT(nvme_ctrlr->resetting == true); 5592 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5593 5594 poll_threads(); 5595 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5596 poll_threads(); 5597 5598 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5599 CU_ASSERT(path_id2->last_failed_tsc == 0); 5600 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5601 CU_ASSERT(nvme_ctrlr->resetting == false); 5602 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5603 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5604 5605 spdk_put_io_channel(ch); 5606 5607 poll_threads(); 5608 5609 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5610 CU_ASSERT(rc == 0); 5611 5612 poll_threads(); 5613 spdk_delay_us(1000); 5614 poll_threads(); 5615 5616 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5617 } 5618 5619 static void 5620 test_fail_path(void) 5621 { 5622 struct nvme_path_id path = {}; 5623 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5624 struct spdk_nvme_ctrlr *ctrlr; 5625 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5626 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5627 struct nvme_ctrlr *nvme_ctrlr; 5628 const int STRING_SIZE = 32; 5629 const char *attached_names[STRING_SIZE]; 5630 struct nvme_bdev *bdev; 5631 struct nvme_ns *nvme_ns; 5632 struct spdk_bdev_io *bdev_io; 5633 struct spdk_io_channel *ch; 5634 struct nvme_bdev_channel *nbdev_ch; 5635 struct nvme_io_path *io_path; 5636 struct nvme_ctrlr_channel *ctrlr_ch; 5637 int rc; 5638 5639 /* The test scenario is the following. 5640 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5641 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5642 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5643 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5644 * comes first. The queued I/O is failed. 5645 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5646 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5647 */ 5648 5649 g_opts.bdev_retry_count = 1; 5650 5651 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5652 ut_init_trid(&path.trid); 5653 5654 set_thread(0); 5655 5656 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5657 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5658 5659 g_ut_attach_ctrlr_status = 0; 5660 g_ut_attach_bdev_count = 1; 5661 5662 opts.ctrlr_loss_timeout_sec = 4; 5663 opts.reconnect_delay_sec = 1; 5664 opts.fast_io_fail_timeout_sec = 2; 5665 5666 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5667 attach_ctrlr_done, NULL, &dopts, &opts, false); 5668 CU_ASSERT(rc == 0); 5669 5670 spdk_delay_us(1000); 5671 poll_threads(); 5672 5673 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5674 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5675 5676 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5677 CU_ASSERT(nvme_ctrlr != NULL); 5678 5679 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5680 CU_ASSERT(bdev != NULL); 5681 5682 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5683 CU_ASSERT(nvme_ns != NULL); 5684 5685 ch = spdk_get_io_channel(bdev); 5686 SPDK_CU_ASSERT_FATAL(ch != NULL); 5687 5688 nbdev_ch = spdk_io_channel_get_ctx(ch); 5689 5690 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5691 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5692 5693 ctrlr_ch = io_path->qpair->ctrlr_ch; 5694 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5695 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5696 5697 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5698 ut_bdev_io_set_buf(bdev_io); 5699 5700 5701 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5702 ctrlr->fail_reset = true; 5703 ctrlr->is_failed = true; 5704 5705 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5706 CU_ASSERT(rc == 0); 5707 CU_ASSERT(nvme_ctrlr->resetting == true); 5708 CU_ASSERT(ctrlr->is_failed == true); 5709 5710 poll_threads(); 5711 5712 CU_ASSERT(nvme_ctrlr->resetting == false); 5713 CU_ASSERT(ctrlr->is_failed == false); 5714 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5715 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5716 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5717 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5718 5719 /* I/O should be queued. */ 5720 bdev_io->internal.in_submit_request = true; 5721 5722 bdev_nvme_submit_request(ch, bdev_io); 5723 5724 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5725 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5726 5727 /* After a second, the I/O should be still queued and the ctrlr should be 5728 * still recovering. 5729 */ 5730 spdk_delay_us(SPDK_SEC_TO_USEC); 5731 poll_threads(); 5732 5733 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5734 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5735 5736 CU_ASSERT(nvme_ctrlr->resetting == false); 5737 CU_ASSERT(ctrlr->is_failed == false); 5738 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5739 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5740 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5741 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5742 5743 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5744 5745 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5746 spdk_delay_us(SPDK_SEC_TO_USEC); 5747 poll_threads(); 5748 5749 CU_ASSERT(nvme_ctrlr->resetting == false); 5750 CU_ASSERT(ctrlr->is_failed == false); 5751 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5752 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5753 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5754 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5755 5756 /* Then within a second, pending I/O should be failed. */ 5757 spdk_delay_us(SPDK_SEC_TO_USEC); 5758 poll_threads(); 5759 5760 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5761 poll_threads(); 5762 5763 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5764 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5765 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5766 5767 /* Another I/O submission should be failed immediately. */ 5768 bdev_io->internal.in_submit_request = true; 5769 5770 bdev_nvme_submit_request(ch, bdev_io); 5771 5772 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5773 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5774 5775 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5776 * be deleted. 5777 */ 5778 spdk_delay_us(SPDK_SEC_TO_USEC); 5779 poll_threads(); 5780 5781 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5782 poll_threads(); 5783 5784 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5785 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5786 CU_ASSERT(nvme_ctrlr->destruct == true); 5787 5788 spdk_put_io_channel(ch); 5789 5790 poll_threads(); 5791 spdk_delay_us(1000); 5792 poll_threads(); 5793 5794 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5795 5796 free(bdev_io); 5797 5798 g_opts.bdev_retry_count = 0; 5799 } 5800 5801 static void 5802 test_nvme_ns_cmp(void) 5803 { 5804 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5805 5806 nvme_ns1.id = 0; 5807 nvme_ns2.id = UINT32_MAX; 5808 5809 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5810 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5811 } 5812 5813 static void 5814 test_ana_transition(void) 5815 { 5816 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5817 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5818 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5819 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5820 5821 /* case 1: ANA transition timedout is canceled. */ 5822 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5823 nvme_ns.ana_transition_timedout = true; 5824 5825 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5826 5827 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5828 5829 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5830 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5831 5832 /* case 2: ANATT timer is kept. */ 5833 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5834 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5835 &nvme_ns, 5836 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5837 5838 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5839 5840 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5841 5842 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5843 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5844 5845 /* case 3: ANATT timer is stopped. */ 5846 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5847 5848 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5849 5850 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5851 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5852 5853 /* ANATT timer is started. */ 5854 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5855 5856 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5857 5858 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5859 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5860 5861 /* ANATT timer is expired. */ 5862 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5863 5864 poll_threads(); 5865 5866 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5867 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5868 } 5869 5870 static void 5871 _set_preferred_path_cb(void *cb_arg, int rc) 5872 { 5873 bool *done = cb_arg; 5874 5875 *done = true; 5876 } 5877 5878 static void 5879 test_set_preferred_path(void) 5880 { 5881 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5882 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5883 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5884 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5885 const int STRING_SIZE = 32; 5886 const char *attached_names[STRING_SIZE]; 5887 struct nvme_bdev *bdev; 5888 struct spdk_io_channel *ch; 5889 struct nvme_bdev_channel *nbdev_ch; 5890 struct nvme_io_path *io_path; 5891 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5892 const struct spdk_nvme_ctrlr_data *cdata; 5893 bool done; 5894 int rc; 5895 5896 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5897 ut_init_trid(&path1.trid); 5898 ut_init_trid2(&path2.trid); 5899 ut_init_trid3(&path3.trid); 5900 g_ut_attach_ctrlr_status = 0; 5901 g_ut_attach_bdev_count = 1; 5902 5903 set_thread(0); 5904 5905 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5906 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5907 5908 ctrlr1->ns[0].uuid = &uuid1; 5909 5910 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5911 attach_ctrlr_done, NULL, &opts, NULL, true); 5912 CU_ASSERT(rc == 0); 5913 5914 spdk_delay_us(1000); 5915 poll_threads(); 5916 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5917 poll_threads(); 5918 5919 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5920 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5921 5922 ctrlr2->ns[0].uuid = &uuid1; 5923 5924 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5925 attach_ctrlr_done, NULL, &opts, NULL, true); 5926 CU_ASSERT(rc == 0); 5927 5928 spdk_delay_us(1000); 5929 poll_threads(); 5930 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5931 poll_threads(); 5932 5933 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5934 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5935 5936 ctrlr3->ns[0].uuid = &uuid1; 5937 5938 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5939 attach_ctrlr_done, NULL, &opts, NULL, true); 5940 CU_ASSERT(rc == 0); 5941 5942 spdk_delay_us(1000); 5943 poll_threads(); 5944 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5945 poll_threads(); 5946 5947 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5948 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5949 5950 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5951 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5952 5953 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5954 5955 ch = spdk_get_io_channel(bdev); 5956 SPDK_CU_ASSERT_FATAL(ch != NULL); 5957 nbdev_ch = spdk_io_channel_get_ctx(ch); 5958 5959 io_path = bdev_nvme_find_io_path(nbdev_ch); 5960 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5961 5962 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5963 5964 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5965 * should return io_path to ctrlr2. 5966 */ 5967 5968 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5969 done = false; 5970 5971 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5972 5973 poll_threads(); 5974 CU_ASSERT(done == true); 5975 5976 io_path = bdev_nvme_find_io_path(nbdev_ch); 5977 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5978 5979 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5980 5981 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5982 * acquired, find_io_path() should return io_path to ctrlr3. 5983 */ 5984 5985 spdk_put_io_channel(ch); 5986 5987 poll_threads(); 5988 5989 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5990 done = false; 5991 5992 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5993 5994 poll_threads(); 5995 CU_ASSERT(done == true); 5996 5997 ch = spdk_get_io_channel(bdev); 5998 SPDK_CU_ASSERT_FATAL(ch != NULL); 5999 nbdev_ch = spdk_io_channel_get_ctx(ch); 6000 6001 io_path = bdev_nvme_find_io_path(nbdev_ch); 6002 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6003 6004 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 6005 6006 spdk_put_io_channel(ch); 6007 6008 poll_threads(); 6009 6010 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6011 CU_ASSERT(rc == 0); 6012 6013 poll_threads(); 6014 spdk_delay_us(1000); 6015 poll_threads(); 6016 6017 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6018 } 6019 6020 static void 6021 test_find_next_io_path(void) 6022 { 6023 struct nvme_bdev_channel nbdev_ch = { 6024 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6025 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6026 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 6027 }; 6028 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6029 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6030 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6031 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6032 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6033 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6034 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6035 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6036 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6037 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6038 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6039 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6040 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6041 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6042 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6043 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6044 6045 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6046 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6047 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6048 6049 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6050 * is covered in test_find_io_path. 6051 */ 6052 6053 nbdev_ch.current_io_path = &io_path2; 6054 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6055 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6056 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6057 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6058 6059 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6060 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6061 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6062 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6063 6064 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6065 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6066 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6067 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6068 6069 nbdev_ch.current_io_path = &io_path3; 6070 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6071 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6072 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6073 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6074 6075 /* Test if next io_path is selected according to rr_min_io */ 6076 6077 nbdev_ch.current_io_path = NULL; 6078 nbdev_ch.rr_min_io = 2; 6079 nbdev_ch.rr_counter = 0; 6080 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6081 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6082 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6083 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6084 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6085 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6086 6087 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6088 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6089 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6090 } 6091 6092 static void 6093 test_find_io_path_min_qd(void) 6094 { 6095 struct nvme_bdev_channel nbdev_ch = { 6096 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6097 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6098 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6099 }; 6100 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6101 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6102 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6103 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6104 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6105 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6106 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6107 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6108 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6109 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6110 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6111 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6112 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6113 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6114 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6115 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6116 6117 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6118 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6119 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6120 6121 /* Test if the minimum io_outstanding or the ANA optimized state is 6122 * prioritized when using least queue depth selector 6123 */ 6124 qpair1.num_outstanding_reqs = 2; 6125 qpair2.num_outstanding_reqs = 1; 6126 qpair3.num_outstanding_reqs = 0; 6127 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6128 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6129 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6130 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6131 6132 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6133 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6134 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6135 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6136 6137 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6138 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6139 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6140 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6141 6142 qpair2.num_outstanding_reqs = 4; 6143 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6144 } 6145 6146 static void 6147 test_disable_auto_failback(void) 6148 { 6149 struct nvme_path_id path1 = {}, path2 = {}; 6150 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6151 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6152 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6153 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6154 struct nvme_ctrlr *nvme_ctrlr1; 6155 const int STRING_SIZE = 32; 6156 const char *attached_names[STRING_SIZE]; 6157 struct nvme_bdev *bdev; 6158 struct spdk_io_channel *ch; 6159 struct nvme_bdev_channel *nbdev_ch; 6160 struct nvme_io_path *io_path; 6161 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6162 const struct spdk_nvme_ctrlr_data *cdata; 6163 bool done; 6164 int rc; 6165 6166 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6167 ut_init_trid(&path1.trid); 6168 ut_init_trid2(&path2.trid); 6169 g_ut_attach_ctrlr_status = 0; 6170 g_ut_attach_bdev_count = 1; 6171 6172 g_opts.disable_auto_failback = true; 6173 6174 opts.ctrlr_loss_timeout_sec = -1; 6175 opts.reconnect_delay_sec = 1; 6176 6177 set_thread(0); 6178 6179 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6180 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6181 6182 ctrlr1->ns[0].uuid = &uuid1; 6183 6184 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6185 attach_ctrlr_done, NULL, &dopts, &opts, true); 6186 CU_ASSERT(rc == 0); 6187 6188 spdk_delay_us(1000); 6189 poll_threads(); 6190 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6191 poll_threads(); 6192 6193 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6194 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6195 6196 ctrlr2->ns[0].uuid = &uuid1; 6197 6198 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6199 attach_ctrlr_done, NULL, &dopts, &opts, true); 6200 CU_ASSERT(rc == 0); 6201 6202 spdk_delay_us(1000); 6203 poll_threads(); 6204 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6205 poll_threads(); 6206 6207 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6208 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6209 6210 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6211 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6212 6213 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn); 6214 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6215 6216 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6217 6218 ch = spdk_get_io_channel(bdev); 6219 SPDK_CU_ASSERT_FATAL(ch != NULL); 6220 nbdev_ch = spdk_io_channel_get_ctx(ch); 6221 6222 io_path = bdev_nvme_find_io_path(nbdev_ch); 6223 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6224 6225 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6226 6227 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6228 ctrlr1->fail_reset = true; 6229 ctrlr1->is_failed = true; 6230 6231 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6232 6233 poll_threads(); 6234 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6235 poll_threads(); 6236 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6237 poll_threads(); 6238 6239 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6240 6241 io_path = bdev_nvme_find_io_path(nbdev_ch); 6242 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6243 6244 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6245 6246 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6247 * Hence, io_path to ctrlr2 should still be used. 6248 */ 6249 ctrlr1->fail_reset = false; 6250 6251 spdk_delay_us(SPDK_SEC_TO_USEC); 6252 poll_threads(); 6253 6254 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6255 6256 io_path = bdev_nvme_find_io_path(nbdev_ch); 6257 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6258 6259 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6260 6261 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6262 * be used again. 6263 */ 6264 6265 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6266 done = false; 6267 6268 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6269 6270 poll_threads(); 6271 CU_ASSERT(done == true); 6272 6273 io_path = bdev_nvme_find_io_path(nbdev_ch); 6274 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6275 6276 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6277 6278 spdk_put_io_channel(ch); 6279 6280 poll_threads(); 6281 6282 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6283 CU_ASSERT(rc == 0); 6284 6285 poll_threads(); 6286 spdk_delay_us(1000); 6287 poll_threads(); 6288 6289 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6290 6291 g_opts.disable_auto_failback = false; 6292 } 6293 6294 static void 6295 ut_set_multipath_policy_done(void *cb_arg, int rc) 6296 { 6297 int *done = cb_arg; 6298 6299 SPDK_CU_ASSERT_FATAL(done != NULL); 6300 *done = rc; 6301 } 6302 6303 static void 6304 test_set_multipath_policy(void) 6305 { 6306 struct nvme_path_id path1 = {}, path2 = {}; 6307 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6308 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6309 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6310 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6311 const int STRING_SIZE = 32; 6312 const char *attached_names[STRING_SIZE]; 6313 struct nvme_bdev *bdev; 6314 struct spdk_io_channel *ch; 6315 struct nvme_bdev_channel *nbdev_ch; 6316 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6317 int done; 6318 int rc; 6319 6320 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6321 ut_init_trid(&path1.trid); 6322 ut_init_trid2(&path2.trid); 6323 g_ut_attach_ctrlr_status = 0; 6324 g_ut_attach_bdev_count = 1; 6325 6326 g_opts.disable_auto_failback = true; 6327 6328 opts.ctrlr_loss_timeout_sec = -1; 6329 opts.reconnect_delay_sec = 1; 6330 6331 set_thread(0); 6332 6333 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6334 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6335 6336 ctrlr1->ns[0].uuid = &uuid1; 6337 6338 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6339 attach_ctrlr_done, NULL, &dopts, &opts, true); 6340 CU_ASSERT(rc == 0); 6341 6342 spdk_delay_us(1000); 6343 poll_threads(); 6344 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6345 poll_threads(); 6346 6347 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6348 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6349 6350 ctrlr2->ns[0].uuid = &uuid1; 6351 6352 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6353 attach_ctrlr_done, NULL, &dopts, &opts, true); 6354 CU_ASSERT(rc == 0); 6355 6356 spdk_delay_us(1000); 6357 poll_threads(); 6358 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6359 poll_threads(); 6360 6361 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6362 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6363 6364 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6365 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6366 6367 /* If multipath policy is updated before getting any I/O channel, 6368 * an new I/O channel should have the update. 6369 */ 6370 done = -1; 6371 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6372 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6373 ut_set_multipath_policy_done, &done); 6374 poll_threads(); 6375 CU_ASSERT(done == 0); 6376 6377 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6378 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6379 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6380 6381 ch = spdk_get_io_channel(bdev); 6382 SPDK_CU_ASSERT_FATAL(ch != NULL); 6383 nbdev_ch = spdk_io_channel_get_ctx(ch); 6384 6385 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6386 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6387 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6388 6389 /* If multipath policy is updated while a I/O channel is active, 6390 * the update should be applied to the I/O channel immediately. 6391 */ 6392 done = -1; 6393 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6394 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6395 ut_set_multipath_policy_done, &done); 6396 poll_threads(); 6397 CU_ASSERT(done == 0); 6398 6399 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6400 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6401 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6402 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6403 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6404 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6405 6406 spdk_put_io_channel(ch); 6407 6408 poll_threads(); 6409 6410 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6411 CU_ASSERT(rc == 0); 6412 6413 poll_threads(); 6414 spdk_delay_us(1000); 6415 poll_threads(); 6416 6417 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6418 } 6419 6420 static void 6421 test_uuid_generation(void) 6422 { 6423 uint32_t nsid1 = 1, nsid2 = 2; 6424 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6425 char sn3[21] = " "; 6426 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6427 struct spdk_uuid uuid1, uuid2; 6428 int rc; 6429 6430 /* Test case 1: 6431 * Serial numbers are the same, nsids are different. 6432 * Compare two generated UUID - they should be different. */ 6433 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6434 CU_ASSERT(rc == 0); 6435 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6436 CU_ASSERT(rc == 0); 6437 6438 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6439 6440 /* Test case 2: 6441 * Serial numbers differ only by one character, nsids are the same. 6442 * Compare two generated UUID - they should be different. */ 6443 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6444 CU_ASSERT(rc == 0); 6445 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6446 CU_ASSERT(rc == 0); 6447 6448 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6449 6450 /* Test case 3: 6451 * Serial number comprises only of space characters. 6452 * Validate the generated UUID. */ 6453 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6454 CU_ASSERT(rc == 0); 6455 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6456 6457 } 6458 6459 static void 6460 test_retry_io_to_same_path(void) 6461 { 6462 struct nvme_path_id path1 = {}, path2 = {}; 6463 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6464 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 6465 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6466 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6467 const int STRING_SIZE = 32; 6468 const char *attached_names[STRING_SIZE]; 6469 struct nvme_bdev *bdev; 6470 struct spdk_bdev_io *bdev_io; 6471 struct nvme_bdev_io *bio; 6472 struct spdk_io_channel *ch; 6473 struct nvme_bdev_channel *nbdev_ch; 6474 struct nvme_io_path *io_path1, *io_path2; 6475 struct ut_nvme_req *req; 6476 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6477 int done; 6478 int rc; 6479 6480 g_opts.nvme_ioq_poll_period_us = 1; 6481 6482 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6483 ut_init_trid(&path1.trid); 6484 ut_init_trid2(&path2.trid); 6485 g_ut_attach_ctrlr_status = 0; 6486 g_ut_attach_bdev_count = 1; 6487 6488 set_thread(0); 6489 6490 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6491 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6492 6493 ctrlr1->ns[0].uuid = &uuid1; 6494 6495 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6496 attach_ctrlr_done, NULL, &opts, NULL, true); 6497 CU_ASSERT(rc == 0); 6498 6499 spdk_delay_us(1000); 6500 poll_threads(); 6501 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6502 poll_threads(); 6503 6504 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6505 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6506 6507 ctrlr2->ns[0].uuid = &uuid1; 6508 6509 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6510 attach_ctrlr_done, NULL, &opts, NULL, true); 6511 CU_ASSERT(rc == 0); 6512 6513 spdk_delay_us(1000); 6514 poll_threads(); 6515 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6516 poll_threads(); 6517 6518 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6519 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6520 6521 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 6522 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6523 6524 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 6525 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6526 6527 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6528 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6529 6530 done = -1; 6531 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6532 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6533 poll_threads(); 6534 CU_ASSERT(done == 0); 6535 6536 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6537 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6538 CU_ASSERT(bdev->rr_min_io == 1); 6539 6540 ch = spdk_get_io_channel(bdev); 6541 SPDK_CU_ASSERT_FATAL(ch != NULL); 6542 nbdev_ch = spdk_io_channel_get_ctx(ch); 6543 6544 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6545 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6546 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6547 6548 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6549 ut_bdev_io_set_buf(bdev_io); 6550 6551 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6552 6553 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6554 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6555 6556 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6557 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6558 6559 /* The 1st I/O should be submitted to io_path1. */ 6560 bdev_io->internal.in_submit_request = true; 6561 6562 bdev_nvme_submit_request(ch, bdev_io); 6563 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6564 CU_ASSERT(bio->io_path == io_path1); 6565 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6566 6567 spdk_delay_us(1); 6568 6569 poll_threads(); 6570 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6571 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6572 6573 /* The 2nd I/O should be submitted to io_path2 because the path selection 6574 * policy is round-robin. 6575 */ 6576 bdev_io->internal.in_submit_request = true; 6577 6578 bdev_nvme_submit_request(ch, bdev_io); 6579 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6580 CU_ASSERT(bio->io_path == io_path2); 6581 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6582 6583 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6584 SPDK_CU_ASSERT_FATAL(req != NULL); 6585 6586 /* Set retry count to non-zero. */ 6587 g_opts.bdev_retry_count = 2; 6588 6589 /* Inject an I/O error. */ 6590 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6591 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6592 6593 /* The 2nd I/O should be queued to nbdev_ch. */ 6594 spdk_delay_us(1); 6595 poll_thread_times(0, 1); 6596 6597 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6598 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6599 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6600 6601 /* The 2nd I/O should keep caching io_path2. */ 6602 CU_ASSERT(bio->io_path == io_path2); 6603 6604 /* The 2nd I/O should be submitted to io_path2 again. */ 6605 poll_thread_times(0, 1); 6606 6607 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6608 CU_ASSERT(bio->io_path == io_path2); 6609 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6610 6611 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6612 SPDK_CU_ASSERT_FATAL(req != NULL); 6613 6614 /* Inject an I/O error again. */ 6615 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6616 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6617 req->cpl.status.crd = 1; 6618 6619 ctrlr2->cdata.crdt[1] = 1; 6620 6621 /* The 2nd I/O should be queued to nbdev_ch. */ 6622 spdk_delay_us(1); 6623 poll_thread_times(0, 1); 6624 6625 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6626 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6627 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6628 6629 /* The 2nd I/O should keep caching io_path2. */ 6630 CU_ASSERT(bio->io_path == io_path2); 6631 6632 /* Detach ctrlr2 dynamically. */ 6633 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6634 CU_ASSERT(rc == 0); 6635 6636 spdk_delay_us(1000); 6637 poll_threads(); 6638 spdk_delay_us(1000); 6639 poll_threads(); 6640 spdk_delay_us(1000); 6641 poll_threads(); 6642 spdk_delay_us(1000); 6643 poll_threads(); 6644 6645 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 6646 6647 poll_threads(); 6648 spdk_delay_us(100000); 6649 poll_threads(); 6650 spdk_delay_us(1); 6651 poll_threads(); 6652 6653 /* The 2nd I/O should succeed by io_path1. */ 6654 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6655 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6656 CU_ASSERT(bio->io_path == io_path1); 6657 6658 free(bdev_io); 6659 6660 spdk_put_io_channel(ch); 6661 6662 poll_threads(); 6663 spdk_delay_us(1); 6664 poll_threads(); 6665 6666 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6667 CU_ASSERT(rc == 0); 6668 6669 poll_threads(); 6670 spdk_delay_us(1000); 6671 poll_threads(); 6672 6673 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6674 6675 g_opts.nvme_ioq_poll_period_us = 0; 6676 g_opts.bdev_retry_count = 0; 6677 } 6678 6679 /* This case is to verify a fix for a complex race condition that 6680 * failover is lost if fabric connect command gets timeout while 6681 * controller is being reset. 6682 */ 6683 static void 6684 test_race_between_reset_and_disconnected(void) 6685 { 6686 struct spdk_nvme_transport_id trid = {}; 6687 struct spdk_nvme_ctrlr ctrlr = {}; 6688 struct nvme_ctrlr *nvme_ctrlr = NULL; 6689 struct nvme_path_id *curr_trid; 6690 struct spdk_io_channel *ch1, *ch2; 6691 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6692 int rc; 6693 6694 ut_init_trid(&trid); 6695 TAILQ_INIT(&ctrlr.active_io_qpairs); 6696 6697 set_thread(0); 6698 6699 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6700 CU_ASSERT(rc == 0); 6701 6702 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6703 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6704 6705 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6706 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6707 6708 ch1 = spdk_get_io_channel(nvme_ctrlr); 6709 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6710 6711 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6712 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6713 6714 set_thread(1); 6715 6716 ch2 = spdk_get_io_channel(nvme_ctrlr); 6717 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6718 6719 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6720 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6721 6722 /* Reset starts from thread 1. */ 6723 set_thread(1); 6724 6725 nvme_ctrlr->resetting = false; 6726 curr_trid->last_failed_tsc = spdk_get_ticks(); 6727 ctrlr.is_failed = true; 6728 6729 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6730 CU_ASSERT(rc == 0); 6731 CU_ASSERT(nvme_ctrlr->resetting == true); 6732 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6733 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6734 6735 poll_thread_times(0, 3); 6736 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6737 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6738 6739 poll_thread_times(0, 1); 6740 poll_thread_times(1, 1); 6741 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6742 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6743 CU_ASSERT(ctrlr.is_failed == true); 6744 6745 poll_thread_times(1, 1); 6746 poll_thread_times(0, 1); 6747 CU_ASSERT(ctrlr.is_failed == false); 6748 CU_ASSERT(ctrlr.adminq.is_connected == false); 6749 6750 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6751 poll_thread_times(0, 2); 6752 CU_ASSERT(ctrlr.adminq.is_connected == true); 6753 6754 poll_thread_times(0, 1); 6755 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6756 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6757 6758 poll_thread_times(1, 1); 6759 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6760 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6761 CU_ASSERT(nvme_ctrlr->resetting == true); 6762 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6763 6764 poll_thread_times(0, 2); 6765 CU_ASSERT(nvme_ctrlr->resetting == true); 6766 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6767 poll_thread_times(1, 1); 6768 CU_ASSERT(nvme_ctrlr->resetting == true); 6769 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6770 6771 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6772 * 6773 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6774 * connect command is executed. If fabric connect command gets timeout, 6775 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6776 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6777 * 6778 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6779 */ 6780 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6781 CU_ASSERT(rc == -EINPROGRESS); 6782 CU_ASSERT(nvme_ctrlr->resetting == true); 6783 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6784 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6785 6786 poll_thread_times(0, 1); 6787 6788 CU_ASSERT(nvme_ctrlr->resetting == true); 6789 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6790 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6791 6792 poll_threads(); 6793 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6794 poll_threads(); 6795 6796 CU_ASSERT(nvme_ctrlr->resetting == false); 6797 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6798 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6799 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6800 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6801 6802 spdk_put_io_channel(ch2); 6803 6804 set_thread(0); 6805 6806 spdk_put_io_channel(ch1); 6807 6808 poll_threads(); 6809 6810 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6811 CU_ASSERT(rc == 0); 6812 6813 poll_threads(); 6814 spdk_delay_us(1000); 6815 poll_threads(); 6816 6817 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6818 } 6819 static void 6820 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6821 { 6822 int *_rc = (int *)cb_arg; 6823 6824 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6825 *_rc = rc; 6826 } 6827 6828 static void 6829 test_ctrlr_op_rpc(void) 6830 { 6831 struct spdk_nvme_transport_id trid = {}; 6832 struct spdk_nvme_ctrlr ctrlr = {}; 6833 struct nvme_ctrlr *nvme_ctrlr = NULL; 6834 struct nvme_path_id *curr_trid; 6835 struct spdk_io_channel *ch1, *ch2; 6836 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6837 int ctrlr_op_rc; 6838 int rc; 6839 6840 ut_init_trid(&trid); 6841 TAILQ_INIT(&ctrlr.active_io_qpairs); 6842 6843 set_thread(0); 6844 6845 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6846 CU_ASSERT(rc == 0); 6847 6848 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6849 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6850 6851 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6852 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6853 6854 ch1 = spdk_get_io_channel(nvme_ctrlr); 6855 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6856 6857 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6858 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6859 6860 set_thread(1); 6861 6862 ch2 = spdk_get_io_channel(nvme_ctrlr); 6863 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6864 6865 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6866 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6867 6868 /* Reset starts from thread 1. */ 6869 set_thread(1); 6870 6871 /* Case 1: ctrlr is already being destructed. */ 6872 nvme_ctrlr->destruct = true; 6873 ctrlr_op_rc = 0; 6874 6875 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6876 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6877 6878 poll_threads(); 6879 6880 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6881 6882 /* Case 2: reset is in progress. */ 6883 nvme_ctrlr->destruct = false; 6884 nvme_ctrlr->resetting = true; 6885 ctrlr_op_rc = 0; 6886 6887 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6888 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6889 6890 poll_threads(); 6891 6892 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6893 6894 /* Case 3: reset completes successfully. */ 6895 nvme_ctrlr->resetting = false; 6896 curr_trid->last_failed_tsc = spdk_get_ticks(); 6897 ctrlr.is_failed = true; 6898 ctrlr_op_rc = -1; 6899 6900 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6901 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6902 6903 CU_ASSERT(nvme_ctrlr->resetting == true); 6904 CU_ASSERT(ctrlr_op_rc == -1); 6905 6906 poll_threads(); 6907 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6908 poll_threads(); 6909 6910 CU_ASSERT(nvme_ctrlr->resetting == false); 6911 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6912 CU_ASSERT(ctrlr.is_failed == false); 6913 CU_ASSERT(ctrlr_op_rc == 0); 6914 6915 /* Case 4: invalid operation. */ 6916 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6917 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6918 6919 poll_threads(); 6920 6921 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6922 6923 spdk_put_io_channel(ch2); 6924 6925 set_thread(0); 6926 6927 spdk_put_io_channel(ch1); 6928 6929 poll_threads(); 6930 6931 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6932 CU_ASSERT(rc == 0); 6933 6934 poll_threads(); 6935 spdk_delay_us(1000); 6936 poll_threads(); 6937 6938 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6939 } 6940 6941 static void 6942 test_bdev_ctrlr_op_rpc(void) 6943 { 6944 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6945 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6946 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6947 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6948 struct nvme_path_id *curr_trid1, *curr_trid2; 6949 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6950 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6951 int ctrlr_op_rc; 6952 int rc; 6953 6954 ut_init_trid(&trid1); 6955 ut_init_trid2(&trid2); 6956 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6957 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6958 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6959 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6960 ctrlr1.cdata.cntlid = 1; 6961 ctrlr2.cdata.cntlid = 2; 6962 ctrlr1.adminq.is_connected = true; 6963 ctrlr2.adminq.is_connected = true; 6964 6965 set_thread(0); 6966 6967 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6968 CU_ASSERT(rc == 0); 6969 6970 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6971 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6972 6973 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN); 6974 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6975 6976 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6977 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6978 6979 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6980 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6981 6982 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6983 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6984 6985 set_thread(1); 6986 6987 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6988 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6989 6990 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6991 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6992 6993 set_thread(0); 6994 6995 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6996 CU_ASSERT(rc == 0); 6997 6998 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN); 6999 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 7000 7001 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 7002 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 7003 7004 ch21 = spdk_get_io_channel(nvme_ctrlr2); 7005 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 7006 7007 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 7008 CU_ASSERT(ctrlr_ch21->qpair != NULL); 7009 7010 set_thread(1); 7011 7012 ch22 = spdk_get_io_channel(nvme_ctrlr2); 7013 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 7014 7015 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 7016 CU_ASSERT(ctrlr_ch22->qpair != NULL); 7017 7018 /* Reset starts from thread 1. */ 7019 set_thread(1); 7020 7021 nvme_ctrlr1->resetting = false; 7022 nvme_ctrlr2->resetting = false; 7023 curr_trid1->last_failed_tsc = spdk_get_ticks(); 7024 curr_trid2->last_failed_tsc = spdk_get_ticks(); 7025 ctrlr_op_rc = -1; 7026 7027 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 7028 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 7029 7030 CU_ASSERT(nvme_ctrlr1->resetting == true); 7031 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7032 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7033 CU_ASSERT(nvme_ctrlr2->resetting == false); 7034 7035 poll_thread_times(0, 3); 7036 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7037 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7038 7039 poll_thread_times(0, 1); 7040 poll_thread_times(1, 1); 7041 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7042 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7043 7044 poll_thread_times(1, 1); 7045 poll_thread_times(0, 1); 7046 CU_ASSERT(ctrlr1.adminq.is_connected == false); 7047 7048 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7049 poll_thread_times(0, 2); 7050 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7051 7052 poll_thread_times(0, 1); 7053 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7054 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7055 7056 poll_thread_times(1, 1); 7057 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7058 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7059 CU_ASSERT(nvme_ctrlr1->resetting == true); 7060 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7061 7062 poll_thread_times(0, 2); 7063 poll_thread_times(1, 1); 7064 poll_thread_times(0, 1); 7065 poll_thread_times(1, 1); 7066 poll_thread_times(0, 1); 7067 poll_thread_times(1, 1); 7068 poll_thread_times(0, 1); 7069 7070 CU_ASSERT(nvme_ctrlr1->resetting == false); 7071 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7072 CU_ASSERT(nvme_ctrlr2->resetting == true); 7073 7074 poll_threads(); 7075 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7076 poll_threads(); 7077 7078 CU_ASSERT(nvme_ctrlr2->resetting == false); 7079 CU_ASSERT(ctrlr_op_rc == 0); 7080 7081 set_thread(1); 7082 7083 spdk_put_io_channel(ch12); 7084 spdk_put_io_channel(ch22); 7085 7086 set_thread(0); 7087 7088 spdk_put_io_channel(ch11); 7089 spdk_put_io_channel(ch21); 7090 7091 poll_threads(); 7092 7093 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7094 CU_ASSERT(rc == 0); 7095 7096 poll_threads(); 7097 spdk_delay_us(1000); 7098 poll_threads(); 7099 7100 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7101 } 7102 7103 static void 7104 test_disable_enable_ctrlr(void) 7105 { 7106 struct spdk_nvme_transport_id trid = {}; 7107 struct spdk_nvme_ctrlr ctrlr = {}; 7108 struct nvme_ctrlr *nvme_ctrlr = NULL; 7109 struct nvme_path_id *curr_trid; 7110 struct spdk_io_channel *ch1, *ch2; 7111 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7112 int rc; 7113 7114 ut_init_trid(&trid); 7115 TAILQ_INIT(&ctrlr.active_io_qpairs); 7116 ctrlr.adminq.is_connected = true; 7117 7118 set_thread(0); 7119 7120 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7121 CU_ASSERT(rc == 0); 7122 7123 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7124 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7125 7126 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7127 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7128 7129 ch1 = spdk_get_io_channel(nvme_ctrlr); 7130 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7131 7132 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7133 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7134 7135 set_thread(1); 7136 7137 ch2 = spdk_get_io_channel(nvme_ctrlr); 7138 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7139 7140 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7141 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7142 7143 /* Disable starts from thread 1. */ 7144 set_thread(1); 7145 7146 /* Case 1: ctrlr is already disabled. */ 7147 nvme_ctrlr->disabled = true; 7148 7149 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7150 CU_ASSERT(rc == -EALREADY); 7151 7152 /* Case 2: ctrlr is already being destructed. */ 7153 nvme_ctrlr->disabled = false; 7154 nvme_ctrlr->destruct = true; 7155 7156 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7157 CU_ASSERT(rc == -ENXIO); 7158 7159 /* Case 3: reset is in progress. */ 7160 nvme_ctrlr->destruct = false; 7161 nvme_ctrlr->resetting = true; 7162 7163 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7164 CU_ASSERT(rc == -EBUSY); 7165 7166 /* Case 4: disable completes successfully. */ 7167 nvme_ctrlr->resetting = false; 7168 7169 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7170 CU_ASSERT(rc == 0); 7171 CU_ASSERT(nvme_ctrlr->resetting == true); 7172 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7173 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7174 7175 poll_thread_times(0, 3); 7176 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7177 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7178 7179 poll_thread_times(0, 1); 7180 poll_thread_times(1, 1); 7181 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7182 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7183 7184 poll_thread_times(1, 1); 7185 poll_thread_times(0, 1); 7186 CU_ASSERT(ctrlr.adminq.is_connected == false); 7187 poll_thread_times(1, 1); 7188 poll_thread_times(0, 1); 7189 poll_thread_times(1, 1); 7190 poll_thread_times(0, 1); 7191 CU_ASSERT(nvme_ctrlr->resetting == false); 7192 CU_ASSERT(nvme_ctrlr->disabled == true); 7193 7194 /* Case 5: enable completes successfully. */ 7195 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7196 CU_ASSERT(rc == 0); 7197 7198 CU_ASSERT(nvme_ctrlr->resetting == true); 7199 CU_ASSERT(nvme_ctrlr->disabled == false); 7200 7201 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7202 poll_thread_times(0, 2); 7203 CU_ASSERT(ctrlr.adminq.is_connected == true); 7204 7205 poll_thread_times(0, 1); 7206 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7207 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7208 7209 poll_thread_times(1, 1); 7210 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7211 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7212 CU_ASSERT(nvme_ctrlr->resetting == true); 7213 7214 poll_thread_times(0, 2); 7215 CU_ASSERT(nvme_ctrlr->resetting == true); 7216 poll_thread_times(1, 1); 7217 CU_ASSERT(nvme_ctrlr->resetting == true); 7218 poll_thread_times(0, 1); 7219 CU_ASSERT(nvme_ctrlr->resetting == false); 7220 7221 /* Case 6: ctrlr is already enabled. */ 7222 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7223 CU_ASSERT(rc == -EALREADY); 7224 7225 set_thread(0); 7226 7227 /* Case 7: disable cancels delayed reconnect. */ 7228 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7229 ctrlr.fail_reset = true; 7230 7231 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7232 CU_ASSERT(rc == 0); 7233 7234 poll_threads(); 7235 7236 CU_ASSERT(nvme_ctrlr->resetting == false); 7237 CU_ASSERT(ctrlr.is_failed == false); 7238 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7239 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7240 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7241 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7242 7243 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7244 CU_ASSERT(rc == 0); 7245 7246 CU_ASSERT(nvme_ctrlr->resetting == true); 7247 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7248 7249 poll_threads(); 7250 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7251 poll_threads(); 7252 7253 CU_ASSERT(nvme_ctrlr->resetting == false); 7254 CU_ASSERT(nvme_ctrlr->disabled == true); 7255 7256 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7257 CU_ASSERT(rc == 0); 7258 7259 CU_ASSERT(nvme_ctrlr->resetting == true); 7260 CU_ASSERT(nvme_ctrlr->disabled == false); 7261 7262 poll_threads(); 7263 7264 CU_ASSERT(nvme_ctrlr->resetting == false); 7265 7266 set_thread(1); 7267 7268 spdk_put_io_channel(ch2); 7269 7270 set_thread(0); 7271 7272 spdk_put_io_channel(ch1); 7273 7274 poll_threads(); 7275 7276 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7277 CU_ASSERT(rc == 0); 7278 7279 poll_threads(); 7280 spdk_delay_us(1000); 7281 poll_threads(); 7282 7283 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7284 } 7285 7286 static void 7287 ut_delete_done(void *ctx, int rc) 7288 { 7289 int *delete_done_rc = ctx; 7290 *delete_done_rc = rc; 7291 } 7292 7293 static void 7294 test_delete_ctrlr_done(void) 7295 { 7296 struct spdk_nvme_transport_id trid = {}; 7297 struct spdk_nvme_ctrlr ctrlr = {}; 7298 int delete_done_rc = 0xDEADBEEF; 7299 int rc; 7300 7301 ut_init_trid(&trid); 7302 7303 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7304 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7305 7306 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7307 CU_ASSERT(rc == 0); 7308 7309 for (int i = 0; i < 20; i++) { 7310 poll_threads(); 7311 if (delete_done_rc == 0) { 7312 break; 7313 } 7314 spdk_delay_us(1000); 7315 } 7316 7317 CU_ASSERT(delete_done_rc == 0); 7318 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7319 } 7320 7321 static void 7322 test_ns_remove_during_reset(void) 7323 { 7324 struct nvme_path_id path = {}; 7325 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 7326 struct spdk_nvme_ctrlr *ctrlr; 7327 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7328 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7329 struct nvme_ctrlr *nvme_ctrlr; 7330 const int STRING_SIZE = 32; 7331 const char *attached_names[STRING_SIZE]; 7332 struct nvme_bdev *bdev; 7333 struct nvme_ns *nvme_ns; 7334 union spdk_nvme_async_event_completion event = {}; 7335 struct spdk_nvme_cpl cpl = {}; 7336 int rc; 7337 7338 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7339 ut_init_trid(&path.trid); 7340 7341 set_thread(0); 7342 7343 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7344 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7345 7346 g_ut_attach_ctrlr_status = 0; 7347 g_ut_attach_bdev_count = 1; 7348 7349 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7350 attach_ctrlr_done, NULL, &dopts, &opts, false); 7351 CU_ASSERT(rc == 0); 7352 7353 spdk_delay_us(1000); 7354 poll_threads(); 7355 7356 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7357 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7358 7359 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 7360 CU_ASSERT(nvme_ctrlr != NULL); 7361 7362 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7363 CU_ASSERT(bdev != NULL); 7364 7365 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7366 CU_ASSERT(nvme_ns != NULL); 7367 7368 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7369 * but nvme_ns->ns should be NULL. 7370 */ 7371 7372 CU_ASSERT(ctrlr->ns[0].is_active == true); 7373 ctrlr->ns[0].is_active = false; 7374 7375 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7376 CU_ASSERT(rc == 0); 7377 7378 poll_threads(); 7379 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7380 poll_threads(); 7381 7382 CU_ASSERT(nvme_ctrlr->resetting == false); 7383 CU_ASSERT(ctrlr->adminq.is_connected == true); 7384 7385 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7386 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7387 CU_ASSERT(nvme_ns->bdev == bdev); 7388 CU_ASSERT(nvme_ns->ns == NULL); 7389 7390 /* Then, async event should fill nvme_ns->ns again. */ 7391 7392 ctrlr->ns[0].is_active = true; 7393 7394 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7395 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7396 cpl.cdw0 = event.raw; 7397 7398 aer_cb(nvme_ctrlr, &cpl); 7399 7400 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7401 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7402 CU_ASSERT(nvme_ns->bdev == bdev); 7403 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7404 7405 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7406 CU_ASSERT(rc == 0); 7407 7408 poll_threads(); 7409 spdk_delay_us(1000); 7410 poll_threads(); 7411 7412 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7413 } 7414 7415 static void 7416 test_io_path_is_current(void) 7417 { 7418 struct nvme_bdev_channel nbdev_ch = { 7419 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 7420 }; 7421 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 7422 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 7423 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 7424 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }, 7425 nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 7426 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {}; 7427 struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 7428 struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 7429 struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, }; 7430 struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, }; 7431 struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, }; 7432 struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, }; 7433 struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 7434 struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 7435 struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 7436 7437 /* io_path1 is deleting */ 7438 io_path1.nbdev_ch = NULL; 7439 7440 CU_ASSERT(nvme_io_path_is_current(&io_path1) == false); 7441 7442 io_path1.nbdev_ch = &nbdev_ch; 7443 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 7444 io_path2.nbdev_ch = &nbdev_ch; 7445 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 7446 io_path3.nbdev_ch = &nbdev_ch; 7447 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 7448 7449 /* active/active: io_path is current if it is available and ANA optimized. */ 7450 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7451 7452 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7453 7454 /* active/active: io_path is not current if it is disconnected even if it is 7455 * ANA optimized. 7456 */ 7457 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7458 7459 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7460 7461 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7462 7463 /* active/passive: io_path is current if it is available and cached. 7464 * (only ANA optimized path is cached for active/passive.) 7465 */ 7466 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7467 nbdev_ch.current_io_path = &io_path2; 7468 7469 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7470 7471 /* active:passive: io_path is not current if it is disconnected even if it is cached */ 7472 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7473 7474 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7475 7476 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7477 7478 /* active/active and active/passive: io_path is not current if it is ANA inaccessible. */ 7479 nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 7480 7481 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7482 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7483 7484 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7485 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7486 7487 /* active/active: non-optimized path is current only if there is no optimized path. */ 7488 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7489 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7490 7491 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7492 7493 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7494 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7495 7496 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7497 7498 /* active/passive: current is true if it is the first one when there is no optimized path. */ 7499 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7500 nbdev_ch.current_io_path = NULL; 7501 7502 CU_ASSERT(nvme_io_path_is_current(&io_path1) == true); 7503 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7504 CU_ASSERT(nvme_io_path_is_current(&io_path3) == false); 7505 } 7506 7507 int 7508 main(int argc, char **argv) 7509 { 7510 CU_pSuite suite = NULL; 7511 unsigned int num_failures; 7512 7513 CU_initialize_registry(); 7514 7515 suite = CU_add_suite("nvme", NULL, NULL); 7516 7517 CU_ADD_TEST(suite, test_create_ctrlr); 7518 CU_ADD_TEST(suite, test_reset_ctrlr); 7519 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7520 CU_ADD_TEST(suite, test_failover_ctrlr); 7521 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7522 CU_ADD_TEST(suite, test_pending_reset); 7523 CU_ADD_TEST(suite, test_attach_ctrlr); 7524 CU_ADD_TEST(suite, test_aer_cb); 7525 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7526 CU_ADD_TEST(suite, test_add_remove_trid); 7527 CU_ADD_TEST(suite, test_abort); 7528 CU_ADD_TEST(suite, test_get_io_qpair); 7529 CU_ADD_TEST(suite, test_bdev_unregister); 7530 CU_ADD_TEST(suite, test_compare_ns); 7531 CU_ADD_TEST(suite, test_init_ana_log_page); 7532 CU_ADD_TEST(suite, test_get_memory_domains); 7533 CU_ADD_TEST(suite, test_reconnect_qpair); 7534 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7535 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7536 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7537 CU_ADD_TEST(suite, test_admin_path); 7538 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7539 CU_ADD_TEST(suite, test_find_io_path); 7540 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7541 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7542 CU_ADD_TEST(suite, test_retry_io_count); 7543 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7544 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7545 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7546 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7547 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7548 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7549 CU_ADD_TEST(suite, test_fail_path); 7550 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7551 CU_ADD_TEST(suite, test_ana_transition); 7552 CU_ADD_TEST(suite, test_set_preferred_path); 7553 CU_ADD_TEST(suite, test_find_next_io_path); 7554 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7555 CU_ADD_TEST(suite, test_disable_auto_failback); 7556 CU_ADD_TEST(suite, test_set_multipath_policy); 7557 CU_ADD_TEST(suite, test_uuid_generation); 7558 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7559 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7560 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7561 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7562 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7563 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7564 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7565 CU_ADD_TEST(suite, test_io_path_is_current); 7566 7567 allocate_threads(3); 7568 set_thread(0); 7569 bdev_nvme_library_init(); 7570 init_accel(); 7571 7572 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7573 7574 set_thread(0); 7575 bdev_nvme_library_fini(); 7576 fini_accel(); 7577 free_threads(); 7578 7579 CU_cleanup_registry(); 7580 7581 return num_failures; 7582 } 7583