1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr), 44 SPDK_ENV_NUMA_ID_ANY); 45 46 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 47 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 48 49 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 50 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 51 52 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 53 54 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 55 int error_code, const char *msg)); 56 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 57 (struct spdk_jsonrpc_request *request), NULL); 58 DEFINE_STUB_V(spdk_jsonrpc_end_result, 59 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 60 61 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 62 size_t opts_size)); 63 64 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 65 size_t opts_size), 0); 66 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 67 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 68 69 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 70 71 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 72 enum spdk_bdev_reset_stat_mode mode)); 73 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 74 struct spdk_bdev_io_stat *add)); 75 76 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 77 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 78 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 79 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 80 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0); 81 82 int 83 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 84 struct spdk_memory_domain **domains, int array_size) 85 { 86 int i, min_array_size; 87 88 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 89 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 90 for (i = 0; i < min_array_size; i++) { 91 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 92 } 93 } 94 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 95 96 return 0; 97 } 98 99 struct spdk_io_channel * 100 spdk_accel_get_io_channel(void) 101 { 102 return spdk_get_io_channel(g_accel_p); 103 } 104 105 void 106 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 107 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 108 { 109 /* Avoid warning that opts is used uninitialised */ 110 memset(opts, 0, opts_size); 111 } 112 113 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003" 114 115 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN}; 116 117 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 118 (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 121 (const struct spdk_nvme_ctrlr *ctrlr), 0); 122 123 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 124 (struct spdk_nvme_ctrlr *ctrlr), NULL); 125 126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 127 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 128 129 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 130 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 133 134 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 135 136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 137 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 138 139 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 140 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 141 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 142 143 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 144 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 145 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 146 147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 148 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 149 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 150 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 151 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 152 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 153 154 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 155 size_t *size), 0); 156 157 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 158 159 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 160 161 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns), 166 SPDK_NVME_16B_GUARD_PI); 167 168 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 169 170 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 171 172 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 173 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 174 175 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 176 177 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 178 char *name, size_t *size), 0); 179 180 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 181 (struct spdk_nvme_ns *ns), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 184 (const struct spdk_nvme_ctrlr *ctrlr), 0); 185 186 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 187 (struct spdk_nvme_ns *ns), 0); 188 189 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 190 (struct spdk_nvme_ns *ns), 0); 191 192 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 193 (struct spdk_nvme_ns *ns), 0); 194 195 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 196 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 197 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 198 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 199 200 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 201 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 202 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 203 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 204 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 205 206 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 208 void *payload, uint32_t payload_size, uint64_t slba, 209 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 210 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 211 212 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 213 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 214 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 215 216 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 217 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 218 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 219 220 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 221 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 222 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 223 224 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 225 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 226 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 227 228 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 229 230 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 231 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 232 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 233 234 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 235 (const struct spdk_nvme_status *status), NULL); 236 237 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 238 (const struct spdk_nvme_status *status), NULL); 239 240 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 241 242 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 243 244 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 245 246 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 247 248 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 249 250 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 251 struct iovec *iov, 252 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 253 DEFINE_STUB(spdk_accel_append_crc32c, int, 254 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 255 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 256 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 257 DEFINE_STUB_V(spdk_accel_sequence_finish, 258 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 259 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 260 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 261 262 struct ut_nvme_req { 263 uint16_t opc; 264 spdk_nvme_cmd_cb cb_fn; 265 void *cb_arg; 266 struct spdk_nvme_cpl cpl; 267 TAILQ_ENTRY(ut_nvme_req) tailq; 268 }; 269 270 struct spdk_nvme_ns { 271 struct spdk_nvme_ctrlr *ctrlr; 272 uint32_t id; 273 bool is_active; 274 struct spdk_uuid *uuid; 275 enum spdk_nvme_ana_state ana_state; 276 enum spdk_nvme_csi csi; 277 }; 278 279 struct spdk_nvme_qpair { 280 struct spdk_nvme_ctrlr *ctrlr; 281 uint8_t failure_reason; 282 bool is_connected; 283 bool in_completion_context; 284 bool delete_after_completion_context; 285 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 286 uint32_t num_outstanding_reqs; 287 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 288 struct spdk_nvme_poll_group *poll_group; 289 void *poll_group_tailq_head; 290 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 291 }; 292 293 struct spdk_nvme_ctrlr { 294 uint32_t num_ns; 295 struct spdk_nvme_ns *ns; 296 struct spdk_nvme_ns_data *nsdata; 297 struct spdk_nvme_qpair adminq; 298 struct spdk_nvme_ctrlr_data cdata; 299 bool attached; 300 bool is_failed; 301 bool fail_reset; 302 bool is_removed; 303 struct spdk_nvme_transport_id trid; 304 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 305 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 306 struct spdk_nvme_ctrlr_opts opts; 307 }; 308 309 struct spdk_nvme_poll_group { 310 void *ctx; 311 struct spdk_nvme_accel_fn_table accel_fn_table; 312 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 313 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 314 }; 315 316 struct spdk_nvme_probe_ctx { 317 struct spdk_nvme_transport_id trid; 318 void *cb_ctx; 319 spdk_nvme_attach_cb attach_cb; 320 struct spdk_nvme_ctrlr *init_ctrlr; 321 }; 322 323 uint32_t 324 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 325 { 326 uint32_t nsid; 327 328 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 329 if (ctrlr->ns[nsid - 1].is_active) { 330 return nsid; 331 } 332 } 333 334 return 0; 335 } 336 337 uint32_t 338 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 339 { 340 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 341 if (ctrlr->ns[nsid - 1].is_active) { 342 return nsid; 343 } 344 } 345 346 return 0; 347 } 348 349 uint32_t 350 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 351 { 352 return qpair->num_outstanding_reqs; 353 } 354 355 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 356 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 357 g_ut_attached_ctrlrs); 358 static int g_ut_attach_ctrlr_status; 359 static size_t g_ut_attach_bdev_count; 360 static int g_ut_register_bdev_status; 361 static struct spdk_bdev *g_ut_registered_bdev; 362 static uint16_t g_ut_cntlid; 363 static struct nvme_path_id g_any_path = {}; 364 365 static void 366 ut_init_trid(struct spdk_nvme_transport_id *trid) 367 { 368 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 369 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 370 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 371 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 372 } 373 374 static void 375 ut_init_trid2(struct spdk_nvme_transport_id *trid) 376 { 377 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 378 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 379 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 380 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 381 } 382 383 static void 384 ut_init_trid3(struct spdk_nvme_transport_id *trid) 385 { 386 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 387 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 388 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 389 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 390 } 391 392 static int 393 cmp_int(int a, int b) 394 { 395 return a - b; 396 } 397 398 int 399 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 400 const struct spdk_nvme_transport_id *trid2) 401 { 402 int cmp; 403 404 /* We assume trtype is TCP for now. */ 405 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 406 407 cmp = cmp_int(trid1->trtype, trid2->trtype); 408 if (cmp) { 409 return cmp; 410 } 411 412 cmp = strcasecmp(trid1->traddr, trid2->traddr); 413 if (cmp) { 414 return cmp; 415 } 416 417 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 418 if (cmp) { 419 return cmp; 420 } 421 422 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 423 if (cmp) { 424 return cmp; 425 } 426 427 cmp = strcmp(trid1->subnqn, trid2->subnqn); 428 if (cmp) { 429 return cmp; 430 } 431 432 return 0; 433 } 434 435 static struct spdk_nvme_ctrlr * 436 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 437 bool ana_reporting, bool multipath) 438 { 439 struct spdk_nvme_ctrlr *ctrlr; 440 uint32_t i; 441 442 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 443 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 444 /* There is a ctrlr whose trid matches. */ 445 return NULL; 446 } 447 } 448 449 ctrlr = calloc(1, sizeof(*ctrlr)); 450 if (ctrlr == NULL) { 451 return NULL; 452 } 453 454 ctrlr->attached = true; 455 ctrlr->adminq.ctrlr = ctrlr; 456 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 457 ctrlr->adminq.is_connected = true; 458 459 if (num_ns != 0) { 460 ctrlr->num_ns = num_ns; 461 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 462 if (ctrlr->ns == NULL) { 463 free(ctrlr); 464 return NULL; 465 } 466 467 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 468 if (ctrlr->nsdata == NULL) { 469 free(ctrlr->ns); 470 free(ctrlr); 471 return NULL; 472 } 473 474 for (i = 0; i < num_ns; i++) { 475 ctrlr->ns[i].id = i + 1; 476 ctrlr->ns[i].ctrlr = ctrlr; 477 ctrlr->ns[i].is_active = true; 478 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 479 ctrlr->nsdata[i].nsze = 1024; 480 ctrlr->nsdata[i].nmic.can_share = multipath; 481 } 482 483 ctrlr->cdata.nn = num_ns; 484 ctrlr->cdata.mnan = num_ns; 485 ctrlr->cdata.nanagrpid = num_ns; 486 } 487 488 ctrlr->cdata.cntlid = ++g_ut_cntlid; 489 ctrlr->cdata.cmic.multi_ctrlr = multipath; 490 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 491 ctrlr->trid = *trid; 492 TAILQ_INIT(&ctrlr->active_io_qpairs); 493 494 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 495 496 return ctrlr; 497 } 498 499 static void 500 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 501 { 502 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 503 504 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 505 free(ctrlr->nsdata); 506 free(ctrlr->ns); 507 free(ctrlr); 508 } 509 510 static int 511 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 512 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 513 { 514 struct ut_nvme_req *req; 515 516 req = calloc(1, sizeof(*req)); 517 if (req == NULL) { 518 return -ENOMEM; 519 } 520 521 req->opc = opc; 522 req->cb_fn = cb_fn; 523 req->cb_arg = cb_arg; 524 525 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 526 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 527 528 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 529 qpair->num_outstanding_reqs++; 530 531 return 0; 532 } 533 534 static struct ut_nvme_req * 535 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 536 { 537 struct ut_nvme_req *req; 538 539 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 540 if (req->cb_arg == cb_arg) { 541 break; 542 } 543 } 544 545 return req; 546 } 547 548 static struct spdk_bdev_io * 549 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 550 struct spdk_io_channel *ch) 551 { 552 struct spdk_bdev_io *bdev_io; 553 554 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 555 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 556 bdev_io->type = type; 557 bdev_io->bdev = &nbdev->disk; 558 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 559 560 return bdev_io; 561 } 562 563 static void 564 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 565 { 566 bdev_io->u.bdev.iovs = &bdev_io->iov; 567 bdev_io->u.bdev.iovcnt = 1; 568 569 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 570 bdev_io->iov.iov_len = 4096; 571 } 572 573 static void 574 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 575 { 576 if (ctrlr->is_failed) { 577 free(ctrlr); 578 return; 579 } 580 581 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 582 if (probe_ctx->cb_ctx) { 583 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 584 } 585 586 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 587 588 if (probe_ctx->attach_cb) { 589 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 590 } 591 } 592 593 int 594 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 595 { 596 struct spdk_nvme_ctrlr *ctrlr, *tmp; 597 598 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 599 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 600 continue; 601 } 602 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 603 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 604 } 605 606 free(probe_ctx); 607 608 return 0; 609 } 610 611 struct spdk_nvme_probe_ctx * 612 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 613 const struct spdk_nvme_ctrlr_opts *opts, 614 spdk_nvme_attach_cb attach_cb) 615 { 616 struct spdk_nvme_probe_ctx *probe_ctx; 617 618 if (trid == NULL) { 619 return NULL; 620 } 621 622 probe_ctx = calloc(1, sizeof(*probe_ctx)); 623 if (probe_ctx == NULL) { 624 return NULL; 625 } 626 627 probe_ctx->trid = *trid; 628 probe_ctx->cb_ctx = (void *)opts; 629 probe_ctx->attach_cb = attach_cb; 630 631 return probe_ctx; 632 } 633 634 int 635 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 636 { 637 if (ctrlr->attached) { 638 ut_detach_ctrlr(ctrlr); 639 } 640 641 return 0; 642 } 643 644 int 645 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 646 { 647 SPDK_CU_ASSERT_FATAL(ctx != NULL); 648 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 649 650 return 0; 651 } 652 653 int 654 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 655 { 656 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 657 } 658 659 void 660 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 661 { 662 memset(opts, 0, opts_size); 663 664 snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN); 665 } 666 667 const struct spdk_nvme_ctrlr_data * 668 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 669 { 670 return &ctrlr->cdata; 671 } 672 673 uint32_t 674 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 675 { 676 return ctrlr->num_ns; 677 } 678 679 struct spdk_nvme_ns * 680 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 681 { 682 if (nsid < 1 || nsid > ctrlr->num_ns) { 683 return NULL; 684 } 685 686 return &ctrlr->ns[nsid - 1]; 687 } 688 689 bool 690 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 691 { 692 if (nsid < 1 || nsid > ctrlr->num_ns) { 693 return false; 694 } 695 696 return ctrlr->ns[nsid - 1].is_active; 697 } 698 699 union spdk_nvme_csts_register 700 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 701 { 702 union spdk_nvme_csts_register csts; 703 704 csts.raw = 0; 705 706 return csts; 707 } 708 709 union spdk_nvme_vs_register 710 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 711 { 712 union spdk_nvme_vs_register vs; 713 714 vs.raw = 0; 715 716 return vs; 717 } 718 719 struct spdk_nvme_qpair * 720 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 721 const struct spdk_nvme_io_qpair_opts *user_opts, 722 size_t opts_size) 723 { 724 struct spdk_nvme_qpair *qpair; 725 726 qpair = calloc(1, sizeof(*qpair)); 727 if (qpair == NULL) { 728 return NULL; 729 } 730 731 qpair->ctrlr = ctrlr; 732 TAILQ_INIT(&qpair->outstanding_reqs); 733 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 734 735 return qpair; 736 } 737 738 static void 739 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 740 { 741 struct spdk_nvme_poll_group *group = qpair->poll_group; 742 743 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 744 745 qpair->poll_group_tailq_head = &group->connected_qpairs; 746 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 747 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 748 } 749 750 static void 751 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 752 { 753 struct spdk_nvme_poll_group *group = qpair->poll_group; 754 755 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 756 757 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 758 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 759 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 760 } 761 762 int 763 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 764 struct spdk_nvme_qpair *qpair) 765 { 766 if (qpair->is_connected) { 767 return -EISCONN; 768 } 769 770 qpair->is_connected = true; 771 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 772 773 if (qpair->poll_group) { 774 nvme_poll_group_connect_qpair(qpair); 775 } 776 777 return 0; 778 } 779 780 void 781 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 782 { 783 if (!qpair->is_connected) { 784 return; 785 } 786 787 qpair->is_connected = false; 788 789 if (qpair->poll_group != NULL) { 790 nvme_poll_group_disconnect_qpair(qpair); 791 } 792 } 793 794 int 795 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 796 { 797 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 798 799 if (qpair->in_completion_context) { 800 qpair->delete_after_completion_context = true; 801 return 0; 802 } 803 804 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 805 806 if (qpair->poll_group != NULL) { 807 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 808 } 809 810 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 811 812 CU_ASSERT(qpair->num_outstanding_reqs == 0); 813 814 free(qpair); 815 816 return 0; 817 } 818 819 int 820 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 821 { 822 if (ctrlr->fail_reset) { 823 ctrlr->is_failed = true; 824 return -EIO; 825 } 826 827 ctrlr->adminq.is_connected = true; 828 return 0; 829 } 830 831 void 832 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 833 { 834 } 835 836 int 837 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 838 { 839 if (ctrlr->is_removed) { 840 return -ENXIO; 841 } 842 843 ctrlr->adminq.is_connected = false; 844 ctrlr->is_failed = false; 845 846 return 0; 847 } 848 849 void 850 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 851 { 852 ctrlr->is_failed = true; 853 } 854 855 bool 856 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 857 { 858 return ctrlr->is_failed; 859 } 860 861 spdk_nvme_qp_failure_reason 862 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 863 { 864 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 865 } 866 867 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 868 sizeof(uint32_t)) 869 static void 870 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 871 { 872 struct spdk_nvme_ana_page ana_hdr; 873 char _ana_desc[UT_ANA_DESC_SIZE]; 874 struct spdk_nvme_ana_group_descriptor *ana_desc; 875 struct spdk_nvme_ns *ns; 876 uint32_t i; 877 878 memset(&ana_hdr, 0, sizeof(ana_hdr)); 879 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 880 881 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 882 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 883 884 buf += sizeof(ana_hdr); 885 length -= sizeof(ana_hdr); 886 887 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 888 889 for (i = 0; i < ctrlr->num_ns; i++) { 890 ns = &ctrlr->ns[i]; 891 892 if (!ns->is_active) { 893 continue; 894 } 895 896 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 897 898 ana_desc->ana_group_id = ns->id; 899 ana_desc->num_of_nsid = 1; 900 ana_desc->ana_state = ns->ana_state; 901 ana_desc->nsid[0] = ns->id; 902 903 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 904 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 905 906 buf += UT_ANA_DESC_SIZE; 907 length -= UT_ANA_DESC_SIZE; 908 } 909 } 910 911 int 912 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 913 uint8_t log_page, uint32_t nsid, 914 void *payload, uint32_t payload_size, 915 uint64_t offset, 916 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 917 { 918 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 919 SPDK_CU_ASSERT_FATAL(offset == 0); 920 ut_create_ana_log_page(ctrlr, payload, payload_size); 921 } 922 923 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 924 cb_fn, cb_arg); 925 } 926 927 int 928 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 929 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 930 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 931 { 932 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 933 } 934 935 int 936 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 937 void *cmd_cb_arg, 938 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 939 { 940 struct ut_nvme_req *req = NULL, *abort_req; 941 942 if (qpair == NULL) { 943 qpair = &ctrlr->adminq; 944 } 945 946 abort_req = calloc(1, sizeof(*abort_req)); 947 if (abort_req == NULL) { 948 return -ENOMEM; 949 } 950 951 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 952 if (req->cb_arg == cmd_cb_arg) { 953 break; 954 } 955 } 956 957 if (req == NULL) { 958 free(abort_req); 959 return -ENOENT; 960 } 961 962 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 963 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 964 965 abort_req->opc = SPDK_NVME_OPC_ABORT; 966 abort_req->cb_fn = cb_fn; 967 abort_req->cb_arg = cb_arg; 968 969 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 970 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 971 abort_req->cpl.cdw0 = 0; 972 973 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 974 ctrlr->adminq.num_outstanding_reqs++; 975 976 return 0; 977 } 978 979 int32_t 980 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 981 { 982 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 983 } 984 985 uint32_t 986 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 987 { 988 return ns->id; 989 } 990 991 struct spdk_nvme_ctrlr * 992 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 993 { 994 return ns->ctrlr; 995 } 996 997 static inline struct spdk_nvme_ns_data * 998 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 999 { 1000 return &ns->ctrlr->nsdata[ns->id - 1]; 1001 } 1002 1003 const struct spdk_nvme_ns_data * 1004 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 1005 { 1006 return _nvme_ns_get_data(ns); 1007 } 1008 1009 uint64_t 1010 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1011 { 1012 return _nvme_ns_get_data(ns)->nsze; 1013 } 1014 1015 const struct spdk_uuid * 1016 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1017 { 1018 return ns->uuid; 1019 } 1020 1021 enum spdk_nvme_csi 1022 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1023 return ns->csi; 1024 } 1025 1026 int 1027 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1028 void *metadata, uint64_t lba, uint32_t lba_count, 1029 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1030 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1031 { 1032 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1033 } 1034 1035 int 1036 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1037 void *buffer, void *metadata, uint64_t lba, 1038 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1039 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1040 { 1041 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1042 } 1043 1044 int 1045 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1046 uint64_t lba, uint32_t lba_count, 1047 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1048 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1049 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1050 uint16_t apptag_mask, uint16_t apptag) 1051 { 1052 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1053 } 1054 1055 int 1056 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1057 uint64_t lba, uint32_t lba_count, 1058 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1059 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1060 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1061 uint16_t apptag_mask, uint16_t apptag) 1062 { 1063 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1064 } 1065 1066 static bool g_ut_readv_ext_called; 1067 int 1068 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1069 uint64_t lba, uint32_t lba_count, 1070 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1071 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1072 spdk_nvme_req_next_sge_cb next_sge_fn, 1073 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1074 { 1075 g_ut_readv_ext_called = true; 1076 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1077 } 1078 1079 static bool g_ut_read_ext_called; 1080 int 1081 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1082 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1083 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1084 { 1085 g_ut_read_ext_called = true; 1086 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1087 } 1088 1089 static bool g_ut_writev_ext_called; 1090 int 1091 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1092 uint64_t lba, uint32_t lba_count, 1093 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1094 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1095 spdk_nvme_req_next_sge_cb next_sge_fn, 1096 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1097 { 1098 g_ut_writev_ext_called = true; 1099 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1100 } 1101 1102 static bool g_ut_write_ext_called; 1103 int 1104 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1105 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1106 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1107 { 1108 g_ut_write_ext_called = true; 1109 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1110 } 1111 1112 int 1113 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1114 uint64_t lba, uint32_t lba_count, 1115 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1116 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1117 spdk_nvme_req_next_sge_cb next_sge_fn, 1118 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1119 { 1120 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1121 } 1122 1123 int 1124 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1125 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1126 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1127 { 1128 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1129 } 1130 1131 int 1132 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1133 uint64_t lba, uint32_t lba_count, 1134 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1135 uint32_t io_flags) 1136 { 1137 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1138 } 1139 1140 int 1141 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1142 const struct spdk_nvme_scc_source_range *ranges, 1143 uint16_t num_ranges, uint64_t dest_lba, 1144 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1145 { 1146 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1147 } 1148 1149 struct spdk_nvme_poll_group * 1150 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1151 { 1152 struct spdk_nvme_poll_group *group; 1153 1154 group = calloc(1, sizeof(*group)); 1155 if (group == NULL) { 1156 return NULL; 1157 } 1158 1159 group->ctx = ctx; 1160 if (table != NULL) { 1161 group->accel_fn_table = *table; 1162 } 1163 TAILQ_INIT(&group->connected_qpairs); 1164 TAILQ_INIT(&group->disconnected_qpairs); 1165 1166 return group; 1167 } 1168 1169 int 1170 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1171 { 1172 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1173 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1174 return -EBUSY; 1175 } 1176 1177 free(group); 1178 1179 return 0; 1180 } 1181 1182 spdk_nvme_qp_failure_reason 1183 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1184 { 1185 return qpair->failure_reason; 1186 } 1187 1188 bool 1189 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1190 { 1191 return qpair->is_connected; 1192 } 1193 1194 int32_t 1195 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1196 uint32_t max_completions) 1197 { 1198 struct ut_nvme_req *req, *tmp; 1199 uint32_t num_completions = 0; 1200 1201 if (!qpair->is_connected) { 1202 return -ENXIO; 1203 } 1204 1205 qpair->in_completion_context = true; 1206 1207 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1208 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1209 qpair->num_outstanding_reqs--; 1210 1211 req->cb_fn(req->cb_arg, &req->cpl); 1212 1213 free(req); 1214 num_completions++; 1215 } 1216 1217 qpair->in_completion_context = false; 1218 if (qpair->delete_after_completion_context) { 1219 spdk_nvme_ctrlr_free_io_qpair(qpair); 1220 } 1221 1222 return num_completions; 1223 } 1224 1225 int64_t 1226 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1227 uint32_t completions_per_qpair, 1228 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1229 { 1230 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1231 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1232 1233 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1234 1235 if (disconnected_qpair_cb == NULL) { 1236 return -EINVAL; 1237 } 1238 1239 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1240 disconnected_qpair_cb(qpair, group->ctx); 1241 } 1242 1243 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1244 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1245 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1246 /* Bump the number of completions so this counts as "busy" */ 1247 num_completions++; 1248 continue; 1249 } 1250 1251 local_completions = spdk_nvme_qpair_process_completions(qpair, 1252 completions_per_qpair); 1253 if (local_completions < 0 && error_reason == 0) { 1254 error_reason = local_completions; 1255 } else { 1256 num_completions += local_completions; 1257 assert(num_completions >= 0); 1258 } 1259 } 1260 1261 return error_reason ? error_reason : num_completions; 1262 } 1263 1264 int 1265 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1266 struct spdk_nvme_qpair *qpair) 1267 { 1268 CU_ASSERT(!qpair->is_connected); 1269 1270 qpair->poll_group = group; 1271 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1272 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1273 1274 return 0; 1275 } 1276 1277 int 1278 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1279 struct spdk_nvme_qpair *qpair) 1280 { 1281 CU_ASSERT(!qpair->is_connected); 1282 1283 if (qpair->poll_group == NULL) { 1284 return -ENOENT; 1285 } 1286 1287 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1288 1289 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1290 1291 qpair->poll_group = NULL; 1292 qpair->poll_group_tailq_head = NULL; 1293 1294 return 0; 1295 } 1296 1297 int 1298 spdk_bdev_register(struct spdk_bdev *bdev) 1299 { 1300 g_ut_registered_bdev = bdev; 1301 1302 return g_ut_register_bdev_status; 1303 } 1304 1305 void 1306 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1307 { 1308 int rc; 1309 1310 rc = bdev->fn_table->destruct(bdev->ctxt); 1311 1312 if (bdev == g_ut_registered_bdev) { 1313 g_ut_registered_bdev = NULL; 1314 } 1315 1316 if (rc <= 0 && cb_fn != NULL) { 1317 cb_fn(cb_arg, rc); 1318 } 1319 } 1320 1321 int 1322 spdk_bdev_open_ext(const char *bdev_name, bool write, 1323 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1324 struct spdk_bdev_desc **desc) 1325 { 1326 if (g_ut_registered_bdev == NULL || 1327 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1328 return -ENODEV; 1329 } 1330 1331 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1332 1333 return 0; 1334 } 1335 1336 struct spdk_bdev * 1337 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1338 { 1339 return (struct spdk_bdev *)desc; 1340 } 1341 1342 int 1343 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1344 { 1345 bdev->blockcnt = size; 1346 1347 return 0; 1348 } 1349 1350 struct spdk_io_channel * 1351 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1352 { 1353 return (struct spdk_io_channel *)bdev_io->internal.ch; 1354 } 1355 1356 struct spdk_thread * 1357 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1358 { 1359 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1360 } 1361 1362 void 1363 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1364 { 1365 bdev_io->internal.status = status; 1366 bdev_io->internal.in_submit_request = false; 1367 } 1368 1369 void 1370 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1371 { 1372 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1373 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1374 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1375 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1376 } else { 1377 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1378 } 1379 1380 bdev_io->internal.error.nvme.cdw0 = cdw0; 1381 bdev_io->internal.error.nvme.sct = sct; 1382 bdev_io->internal.error.nvme.sc = sc; 1383 1384 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1385 } 1386 1387 void 1388 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1389 { 1390 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1391 1392 ut_bdev_io_set_buf(bdev_io); 1393 1394 cb(ch, bdev_io, true); 1395 } 1396 1397 static void 1398 test_create_ctrlr(void) 1399 { 1400 struct spdk_nvme_transport_id trid = {}; 1401 struct spdk_nvme_ctrlr ctrlr = {}; 1402 int rc; 1403 1404 ut_init_trid(&trid); 1405 1406 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1407 CU_ASSERT(rc == 0); 1408 1409 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1410 1411 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1412 CU_ASSERT(rc == 0); 1413 1414 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1415 1416 poll_threads(); 1417 spdk_delay_us(1000); 1418 poll_threads(); 1419 1420 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1421 } 1422 1423 static void 1424 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1425 { 1426 bool *detect_remove = cb_arg; 1427 1428 CU_ASSERT(rc != 0); 1429 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1430 1431 *detect_remove = true; 1432 } 1433 1434 static void 1435 test_reset_ctrlr(void) 1436 { 1437 struct spdk_nvme_transport_id trid = {}; 1438 struct spdk_nvme_ctrlr ctrlr = {}; 1439 struct nvme_ctrlr *nvme_ctrlr = NULL; 1440 struct nvme_path_id *curr_trid; 1441 struct spdk_io_channel *ch1, *ch2; 1442 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1443 bool detect_remove; 1444 int rc; 1445 1446 ut_init_trid(&trid); 1447 TAILQ_INIT(&ctrlr.active_io_qpairs); 1448 1449 set_thread(0); 1450 1451 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1452 CU_ASSERT(rc == 0); 1453 1454 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1455 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1456 1457 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1458 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1459 1460 ch1 = spdk_get_io_channel(nvme_ctrlr); 1461 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1462 1463 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1464 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1465 1466 set_thread(1); 1467 1468 ch2 = spdk_get_io_channel(nvme_ctrlr); 1469 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1470 1471 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1472 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1473 1474 /* Reset starts from thread 1. */ 1475 set_thread(1); 1476 1477 /* Case 1: ctrlr is already being destructed. */ 1478 nvme_ctrlr->destruct = true; 1479 1480 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1481 CU_ASSERT(rc == -ENXIO); 1482 1483 /* Case 2: reset is in progress. */ 1484 nvme_ctrlr->destruct = false; 1485 nvme_ctrlr->resetting = true; 1486 1487 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1488 CU_ASSERT(rc == -EBUSY); 1489 1490 /* Case 3: reset completes successfully. */ 1491 nvme_ctrlr->resetting = false; 1492 curr_trid->last_failed_tsc = spdk_get_ticks(); 1493 ctrlr.is_failed = true; 1494 1495 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1496 CU_ASSERT(rc == 0); 1497 CU_ASSERT(nvme_ctrlr->resetting == true); 1498 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1499 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1500 1501 poll_thread_times(0, 3); 1502 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1503 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1504 1505 poll_thread_times(0, 1); 1506 poll_thread_times(1, 1); 1507 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1508 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1509 CU_ASSERT(ctrlr.is_failed == true); 1510 1511 poll_thread_times(1, 1); 1512 poll_thread_times(0, 1); 1513 CU_ASSERT(ctrlr.is_failed == false); 1514 CU_ASSERT(ctrlr.adminq.is_connected == false); 1515 1516 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1517 poll_thread_times(0, 2); 1518 CU_ASSERT(ctrlr.adminq.is_connected == true); 1519 1520 poll_thread_times(0, 1); 1521 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1522 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1523 1524 poll_thread_times(1, 1); 1525 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1526 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1527 CU_ASSERT(nvme_ctrlr->resetting == true); 1528 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1529 1530 poll_thread_times(0, 2); 1531 CU_ASSERT(nvme_ctrlr->resetting == true); 1532 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1533 poll_thread_times(1, 1); 1534 CU_ASSERT(nvme_ctrlr->resetting == true); 1535 poll_thread_times(0, 1); 1536 CU_ASSERT(nvme_ctrlr->resetting == false); 1537 1538 /* Case 4: ctrlr is already removed. */ 1539 ctrlr.is_removed = true; 1540 1541 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1542 CU_ASSERT(rc == 0); 1543 1544 detect_remove = false; 1545 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1546 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1547 1548 poll_threads(); 1549 1550 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1551 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1552 CU_ASSERT(detect_remove == true); 1553 1554 ctrlr.is_removed = false; 1555 1556 spdk_put_io_channel(ch2); 1557 1558 set_thread(0); 1559 1560 spdk_put_io_channel(ch1); 1561 1562 poll_threads(); 1563 1564 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1565 CU_ASSERT(rc == 0); 1566 1567 poll_threads(); 1568 spdk_delay_us(1000); 1569 poll_threads(); 1570 1571 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1572 } 1573 1574 static void 1575 test_race_between_reset_and_destruct_ctrlr(void) 1576 { 1577 struct spdk_nvme_transport_id trid = {}; 1578 struct spdk_nvme_ctrlr ctrlr = {}; 1579 struct nvme_ctrlr *nvme_ctrlr; 1580 struct spdk_io_channel *ch1, *ch2; 1581 int rc; 1582 1583 ut_init_trid(&trid); 1584 TAILQ_INIT(&ctrlr.active_io_qpairs); 1585 1586 set_thread(0); 1587 1588 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1589 CU_ASSERT(rc == 0); 1590 1591 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1592 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1593 1594 ch1 = spdk_get_io_channel(nvme_ctrlr); 1595 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1596 1597 set_thread(1); 1598 1599 ch2 = spdk_get_io_channel(nvme_ctrlr); 1600 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1601 1602 /* Reset starts from thread 1. */ 1603 set_thread(1); 1604 1605 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1606 CU_ASSERT(rc == 0); 1607 CU_ASSERT(nvme_ctrlr->resetting == true); 1608 1609 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1610 set_thread(0); 1611 1612 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1613 CU_ASSERT(rc == 0); 1614 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1615 CU_ASSERT(nvme_ctrlr->destruct == true); 1616 CU_ASSERT(nvme_ctrlr->resetting == true); 1617 1618 poll_threads(); 1619 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1620 poll_threads(); 1621 1622 /* Reset completed but ctrlr is not still destructed yet. */ 1623 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1624 CU_ASSERT(nvme_ctrlr->destruct == true); 1625 CU_ASSERT(nvme_ctrlr->resetting == false); 1626 1627 /* New reset request is rejected. */ 1628 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1629 CU_ASSERT(rc == -ENXIO); 1630 1631 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1632 * However there are two channels and destruct is not completed yet. 1633 */ 1634 poll_threads(); 1635 1636 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1637 1638 set_thread(0); 1639 1640 spdk_put_io_channel(ch1); 1641 1642 set_thread(1); 1643 1644 spdk_put_io_channel(ch2); 1645 1646 poll_threads(); 1647 spdk_delay_us(1000); 1648 poll_threads(); 1649 1650 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1651 } 1652 1653 static void 1654 test_failover_ctrlr(void) 1655 { 1656 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1657 struct spdk_nvme_ctrlr ctrlr = {}; 1658 struct nvme_ctrlr *nvme_ctrlr = NULL; 1659 struct nvme_path_id *curr_trid, *next_trid; 1660 struct spdk_io_channel *ch1, *ch2; 1661 int rc; 1662 1663 ut_init_trid(&trid1); 1664 ut_init_trid2(&trid2); 1665 TAILQ_INIT(&ctrlr.active_io_qpairs); 1666 1667 set_thread(0); 1668 1669 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1670 CU_ASSERT(rc == 0); 1671 1672 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1673 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1674 1675 ch1 = spdk_get_io_channel(nvme_ctrlr); 1676 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1677 1678 set_thread(1); 1679 1680 ch2 = spdk_get_io_channel(nvme_ctrlr); 1681 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1682 1683 /* First, test one trid case. */ 1684 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1685 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1686 1687 /* Failover starts from thread 1. */ 1688 set_thread(1); 1689 1690 /* Case 1: ctrlr is already being destructed. */ 1691 nvme_ctrlr->destruct = true; 1692 1693 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1694 CU_ASSERT(rc == -ENXIO); 1695 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1696 1697 /* Case 2: reset is in progress. */ 1698 nvme_ctrlr->destruct = false; 1699 nvme_ctrlr->resetting = true; 1700 1701 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1702 CU_ASSERT(rc == -EINPROGRESS); 1703 1704 /* Case 3: reset completes successfully. */ 1705 nvme_ctrlr->resetting = false; 1706 1707 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1708 CU_ASSERT(rc == 0); 1709 1710 CU_ASSERT(nvme_ctrlr->resetting == true); 1711 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1712 1713 poll_threads(); 1714 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1715 poll_threads(); 1716 1717 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1718 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1719 1720 CU_ASSERT(nvme_ctrlr->resetting == false); 1721 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1722 1723 set_thread(0); 1724 1725 /* Second, test two trids case. */ 1726 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1727 CU_ASSERT(rc == 0); 1728 1729 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1730 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1731 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1732 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1733 1734 /* Failover starts from thread 1. */ 1735 set_thread(1); 1736 1737 /* Case 4: reset is in progress. */ 1738 nvme_ctrlr->resetting = true; 1739 1740 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1741 CU_ASSERT(rc == -EINPROGRESS); 1742 1743 /* Case 5: failover completes successfully. */ 1744 nvme_ctrlr->resetting = false; 1745 1746 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1747 CU_ASSERT(rc == 0); 1748 1749 CU_ASSERT(nvme_ctrlr->resetting == true); 1750 1751 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1752 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1753 CU_ASSERT(next_trid != curr_trid); 1754 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1755 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1756 1757 poll_threads(); 1758 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1759 poll_threads(); 1760 1761 CU_ASSERT(nvme_ctrlr->resetting == false); 1762 1763 spdk_put_io_channel(ch2); 1764 1765 set_thread(0); 1766 1767 spdk_put_io_channel(ch1); 1768 1769 poll_threads(); 1770 1771 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1772 CU_ASSERT(rc == 0); 1773 1774 poll_threads(); 1775 spdk_delay_us(1000); 1776 poll_threads(); 1777 1778 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1779 } 1780 1781 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1782 * 1783 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1784 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1785 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1786 * have been active, i.e., the head of the list until the failover completed. 1787 * However trid3 was inserted to the head of the list by mistake. 1788 * 1789 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1790 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1791 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1792 * may be executed repeatedly before failover is executed. Hence this bug is real. 1793 * 1794 * The following test verifies the fix. 1795 */ 1796 static void 1797 test_race_between_failover_and_add_secondary_trid(void) 1798 { 1799 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1800 struct spdk_nvme_ctrlr ctrlr = {}; 1801 struct nvme_ctrlr *nvme_ctrlr = NULL; 1802 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1803 struct spdk_io_channel *ch1, *ch2; 1804 int rc; 1805 1806 ut_init_trid(&trid1); 1807 ut_init_trid2(&trid2); 1808 ut_init_trid3(&trid3); 1809 TAILQ_INIT(&ctrlr.active_io_qpairs); 1810 1811 set_thread(0); 1812 1813 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1814 CU_ASSERT(rc == 0); 1815 1816 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1817 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1818 1819 ch1 = spdk_get_io_channel(nvme_ctrlr); 1820 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1821 1822 set_thread(1); 1823 1824 ch2 = spdk_get_io_channel(nvme_ctrlr); 1825 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1826 1827 set_thread(0); 1828 1829 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1830 CU_ASSERT(rc == 0); 1831 1832 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1833 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1834 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1835 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1836 path_id2 = TAILQ_NEXT(path_id1, link); 1837 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1838 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1839 1840 ctrlr.fail_reset = true; 1841 1842 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1843 CU_ASSERT(rc == 0); 1844 1845 poll_threads(); 1846 1847 CU_ASSERT(path_id1->last_failed_tsc != 0); 1848 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1849 1850 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1851 CU_ASSERT(rc == 0); 1852 1853 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1854 CU_ASSERT(rc == 0); 1855 1856 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1857 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1858 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1859 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1860 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1861 path_id3 = TAILQ_NEXT(path_id2, link); 1862 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1863 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1864 1865 poll_threads(); 1866 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1867 poll_threads(); 1868 1869 spdk_put_io_channel(ch1); 1870 1871 set_thread(1); 1872 1873 spdk_put_io_channel(ch2); 1874 1875 poll_threads(); 1876 1877 set_thread(0); 1878 1879 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1880 CU_ASSERT(rc == 0); 1881 1882 poll_threads(); 1883 spdk_delay_us(1000); 1884 poll_threads(); 1885 1886 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1887 } 1888 1889 static void 1890 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1891 { 1892 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1893 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1894 } 1895 1896 static void 1897 test_pending_reset(void) 1898 { 1899 struct spdk_nvme_transport_id trid = {}; 1900 struct spdk_nvme_ctrlr *ctrlr; 1901 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 1902 struct nvme_ctrlr *nvme_ctrlr = NULL; 1903 const int STRING_SIZE = 32; 1904 const char *attached_names[STRING_SIZE]; 1905 struct nvme_bdev *bdev; 1906 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1907 struct spdk_io_channel *ch1, *ch2; 1908 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1909 struct nvme_io_path *io_path1, *io_path2; 1910 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1911 int rc; 1912 1913 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1914 ut_init_trid(&trid); 1915 1916 set_thread(0); 1917 1918 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1919 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1920 1921 g_ut_attach_ctrlr_status = 0; 1922 g_ut_attach_bdev_count = 1; 1923 1924 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1925 attach_ctrlr_done, NULL, &opts, NULL, false); 1926 CU_ASSERT(rc == 0); 1927 1928 spdk_delay_us(1000); 1929 poll_threads(); 1930 1931 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1932 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1933 1934 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1935 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1936 1937 ch1 = spdk_get_io_channel(bdev); 1938 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1939 1940 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1941 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1942 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1943 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1944 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1945 1946 set_thread(1); 1947 1948 ch2 = spdk_get_io_channel(bdev); 1949 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1950 1951 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1952 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1953 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1954 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1955 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1956 1957 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1958 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1959 1960 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1961 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1962 1963 /* The first reset request is submitted on thread 1, and the second reset request 1964 * is submitted on thread 0 while processing the first request. 1965 */ 1966 bdev_nvme_submit_request(ch2, first_bdev_io); 1967 CU_ASSERT(nvme_ctrlr->resetting == true); 1968 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1969 1970 set_thread(0); 1971 1972 bdev_nvme_submit_request(ch1, second_bdev_io); 1973 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1974 1975 poll_threads(); 1976 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1977 poll_threads(); 1978 1979 CU_ASSERT(nvme_ctrlr->resetting == false); 1980 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1981 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1982 1983 /* The first reset request is submitted on thread 1, and the second reset request 1984 * is submitted on thread 0 while processing the first request. 1985 * 1986 * The difference from the above scenario is that the controller is removed while 1987 * processing the first request. Hence both reset requests should fail. 1988 */ 1989 set_thread(1); 1990 1991 bdev_nvme_submit_request(ch2, first_bdev_io); 1992 CU_ASSERT(nvme_ctrlr->resetting == true); 1993 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1994 1995 set_thread(0); 1996 1997 bdev_nvme_submit_request(ch1, second_bdev_io); 1998 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1999 2000 ctrlr->fail_reset = true; 2001 2002 poll_threads(); 2003 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2004 poll_threads(); 2005 2006 CU_ASSERT(nvme_ctrlr->resetting == false); 2007 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2008 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2009 2010 spdk_put_io_channel(ch1); 2011 2012 set_thread(1); 2013 2014 spdk_put_io_channel(ch2); 2015 2016 poll_threads(); 2017 2018 set_thread(0); 2019 2020 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2021 CU_ASSERT(rc == 0); 2022 2023 poll_threads(); 2024 spdk_delay_us(1000); 2025 poll_threads(); 2026 2027 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2028 2029 free(first_bdev_io); 2030 free(second_bdev_io); 2031 } 2032 2033 static void 2034 test_attach_ctrlr(void) 2035 { 2036 struct spdk_nvme_transport_id trid = {}; 2037 struct spdk_nvme_ctrlr *ctrlr; 2038 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2039 struct nvme_ctrlr *nvme_ctrlr; 2040 const int STRING_SIZE = 32; 2041 const char *attached_names[STRING_SIZE]; 2042 struct nvme_bdev *nbdev; 2043 int rc; 2044 2045 set_thread(0); 2046 2047 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2048 ut_init_trid(&trid); 2049 2050 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2051 * by probe polling. 2052 */ 2053 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2054 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2055 2056 ctrlr->is_failed = true; 2057 g_ut_attach_ctrlr_status = -EIO; 2058 g_ut_attach_bdev_count = 0; 2059 2060 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2061 attach_ctrlr_done, NULL, &opts, NULL, false); 2062 CU_ASSERT(rc == 0); 2063 2064 spdk_delay_us(1000); 2065 poll_threads(); 2066 2067 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2068 2069 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2070 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2071 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2072 2073 g_ut_attach_ctrlr_status = 0; 2074 2075 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2076 attach_ctrlr_done, NULL, &opts, NULL, false); 2077 CU_ASSERT(rc == 0); 2078 2079 spdk_delay_us(1000); 2080 poll_threads(); 2081 2082 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2083 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2084 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2085 2086 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2087 CU_ASSERT(rc == 0); 2088 2089 poll_threads(); 2090 spdk_delay_us(1000); 2091 poll_threads(); 2092 2093 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2094 2095 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2096 * one nvme_bdev is created. 2097 */ 2098 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2099 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2100 2101 g_ut_attach_bdev_count = 1; 2102 2103 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2104 attach_ctrlr_done, NULL, &opts, NULL, false); 2105 CU_ASSERT(rc == 0); 2106 2107 spdk_delay_us(1000); 2108 poll_threads(); 2109 2110 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2111 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2112 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2113 2114 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2115 attached_names[0] = NULL; 2116 2117 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2118 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2119 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2120 2121 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2122 CU_ASSERT(rc == 0); 2123 2124 poll_threads(); 2125 spdk_delay_us(1000); 2126 poll_threads(); 2127 2128 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2129 2130 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2131 * created because creating one nvme_bdev failed. 2132 */ 2133 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2134 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2135 2136 g_ut_register_bdev_status = -EINVAL; 2137 g_ut_attach_bdev_count = 0; 2138 2139 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2140 attach_ctrlr_done, NULL, &opts, NULL, false); 2141 CU_ASSERT(rc == 0); 2142 2143 spdk_delay_us(1000); 2144 poll_threads(); 2145 2146 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2147 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2148 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2149 2150 CU_ASSERT(attached_names[0] == NULL); 2151 2152 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2153 CU_ASSERT(rc == 0); 2154 2155 poll_threads(); 2156 spdk_delay_us(1000); 2157 poll_threads(); 2158 2159 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2160 2161 g_ut_register_bdev_status = 0; 2162 } 2163 2164 static void 2165 test_aer_cb(void) 2166 { 2167 struct spdk_nvme_transport_id trid = {}; 2168 struct spdk_nvme_ctrlr *ctrlr; 2169 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2170 struct nvme_ctrlr *nvme_ctrlr; 2171 struct nvme_bdev *bdev; 2172 const int STRING_SIZE = 32; 2173 const char *attached_names[STRING_SIZE]; 2174 union spdk_nvme_async_event_completion event = {}; 2175 struct spdk_nvme_cpl cpl = {}; 2176 int rc; 2177 2178 set_thread(0); 2179 2180 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2181 ut_init_trid(&trid); 2182 2183 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2184 * namespaces are populated. 2185 */ 2186 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2187 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2188 2189 ctrlr->ns[0].is_active = false; 2190 2191 g_ut_attach_ctrlr_status = 0; 2192 g_ut_attach_bdev_count = 3; 2193 2194 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2195 attach_ctrlr_done, NULL, &opts, NULL, false); 2196 CU_ASSERT(rc == 0); 2197 2198 spdk_delay_us(1000); 2199 poll_threads(); 2200 2201 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2202 poll_threads(); 2203 2204 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2205 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2206 2207 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2208 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2209 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2210 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2211 2212 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2213 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2214 CU_ASSERT(bdev->disk.blockcnt == 1024); 2215 2216 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2217 * change the size of the 4th namespace. 2218 */ 2219 ctrlr->ns[0].is_active = true; 2220 ctrlr->ns[2].is_active = false; 2221 ctrlr->nsdata[3].nsze = 2048; 2222 2223 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2224 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2225 cpl.cdw0 = event.raw; 2226 2227 aer_cb(nvme_ctrlr, &cpl); 2228 2229 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2230 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2231 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2232 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2233 CU_ASSERT(bdev->disk.blockcnt == 2048); 2234 2235 /* Change ANA state of active namespaces. */ 2236 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2237 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2238 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2239 2240 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2241 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2242 cpl.cdw0 = event.raw; 2243 2244 aer_cb(nvme_ctrlr, &cpl); 2245 2246 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2247 poll_threads(); 2248 2249 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2250 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2251 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2252 2253 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2254 CU_ASSERT(rc == 0); 2255 2256 poll_threads(); 2257 spdk_delay_us(1000); 2258 poll_threads(); 2259 2260 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2261 } 2262 2263 static void 2264 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2265 enum spdk_bdev_io_type io_type) 2266 { 2267 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2268 struct nvme_io_path *io_path; 2269 struct spdk_nvme_qpair *qpair; 2270 2271 io_path = bdev_nvme_find_io_path(nbdev_ch); 2272 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2273 qpair = io_path->qpair->qpair; 2274 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2275 2276 bdev_io->type = io_type; 2277 bdev_io->internal.in_submit_request = true; 2278 2279 bdev_nvme_submit_request(ch, bdev_io); 2280 2281 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2282 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2283 2284 poll_threads(); 2285 2286 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2287 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2288 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2289 } 2290 2291 static void 2292 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2293 enum spdk_bdev_io_type io_type) 2294 { 2295 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2296 struct nvme_io_path *io_path; 2297 struct spdk_nvme_qpair *qpair; 2298 2299 io_path = bdev_nvme_find_io_path(nbdev_ch); 2300 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2301 qpair = io_path->qpair->qpair; 2302 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2303 2304 bdev_io->type = io_type; 2305 bdev_io->internal.in_submit_request = true; 2306 2307 bdev_nvme_submit_request(ch, bdev_io); 2308 2309 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2310 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2311 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2312 } 2313 2314 static void 2315 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2316 { 2317 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2318 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2319 struct ut_nvme_req *req; 2320 struct nvme_io_path *io_path; 2321 struct spdk_nvme_qpair *qpair; 2322 2323 io_path = bdev_nvme_find_io_path(nbdev_ch); 2324 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2325 qpair = io_path->qpair->qpair; 2326 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2327 2328 /* Only compare and write now. */ 2329 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2330 bdev_io->internal.in_submit_request = true; 2331 2332 bdev_nvme_submit_request(ch, bdev_io); 2333 2334 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2335 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2336 CU_ASSERT(bio->first_fused_submitted == true); 2337 2338 /* First outstanding request is compare operation. */ 2339 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2340 SPDK_CU_ASSERT_FATAL(req != NULL); 2341 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2342 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2343 2344 poll_threads(); 2345 2346 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2347 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2348 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2349 } 2350 2351 static void 2352 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2353 struct spdk_nvme_ctrlr *ctrlr) 2354 { 2355 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2356 bdev_io->internal.in_submit_request = true; 2357 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2358 2359 bdev_nvme_submit_request(ch, bdev_io); 2360 2361 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2362 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2363 2364 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2365 poll_thread_times(1, 1); 2366 2367 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2368 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2369 2370 poll_thread_times(0, 1); 2371 2372 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2373 } 2374 2375 static void 2376 test_submit_nvme_cmd(void) 2377 { 2378 struct spdk_nvme_transport_id trid = {}; 2379 struct spdk_nvme_ctrlr *ctrlr; 2380 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2381 struct nvme_ctrlr *nvme_ctrlr; 2382 const int STRING_SIZE = 32; 2383 const char *attached_names[STRING_SIZE]; 2384 struct nvme_bdev *bdev; 2385 struct spdk_bdev_io *bdev_io; 2386 struct spdk_io_channel *ch; 2387 int rc; 2388 2389 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2390 ut_init_trid(&trid); 2391 2392 set_thread(1); 2393 2394 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2395 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2396 2397 g_ut_attach_ctrlr_status = 0; 2398 g_ut_attach_bdev_count = 1; 2399 2400 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2401 attach_ctrlr_done, NULL, &opts, NULL, false); 2402 CU_ASSERT(rc == 0); 2403 2404 spdk_delay_us(1000); 2405 poll_threads(); 2406 2407 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2408 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2409 2410 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2411 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2412 2413 set_thread(0); 2414 2415 ch = spdk_get_io_channel(bdev); 2416 SPDK_CU_ASSERT_FATAL(ch != NULL); 2417 2418 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2419 2420 bdev_io->u.bdev.iovs = NULL; 2421 2422 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2423 2424 ut_bdev_io_set_buf(bdev_io); 2425 2426 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2427 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2428 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2429 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2430 2431 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2432 2433 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2434 2435 /* Verify that ext NVME API is called when data is described by memory domain */ 2436 g_ut_read_ext_called = false; 2437 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2438 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2439 CU_ASSERT(g_ut_read_ext_called == true); 2440 g_ut_read_ext_called = false; 2441 bdev_io->u.bdev.memory_domain = NULL; 2442 2443 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2444 2445 free(bdev_io); 2446 2447 spdk_put_io_channel(ch); 2448 2449 poll_threads(); 2450 2451 set_thread(1); 2452 2453 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2454 CU_ASSERT(rc == 0); 2455 2456 poll_threads(); 2457 spdk_delay_us(1000); 2458 poll_threads(); 2459 2460 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2461 } 2462 2463 static void 2464 test_add_remove_trid(void) 2465 { 2466 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2467 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2468 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2469 struct nvme_ctrlr *nvme_ctrlr = NULL; 2470 const int STRING_SIZE = 32; 2471 const char *attached_names[STRING_SIZE]; 2472 struct nvme_path_id *ctrid; 2473 int rc; 2474 2475 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2476 ut_init_trid(&path1.trid); 2477 ut_init_trid2(&path2.trid); 2478 ut_init_trid3(&path3.trid); 2479 2480 set_thread(0); 2481 2482 g_ut_attach_ctrlr_status = 0; 2483 g_ut_attach_bdev_count = 0; 2484 2485 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2486 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2487 2488 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2489 attach_ctrlr_done, NULL, &opts, NULL, false); 2490 CU_ASSERT(rc == 0); 2491 2492 spdk_delay_us(1000); 2493 poll_threads(); 2494 2495 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2496 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2497 2498 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2499 2500 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2501 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2502 2503 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2504 attach_ctrlr_done, NULL, &opts, NULL, false); 2505 CU_ASSERT(rc == 0); 2506 2507 spdk_delay_us(1000); 2508 poll_threads(); 2509 2510 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2511 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2512 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2513 break; 2514 } 2515 } 2516 CU_ASSERT(ctrid != NULL); 2517 2518 /* trid3 is not in the registered list. */ 2519 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2520 CU_ASSERT(rc == -ENXIO); 2521 2522 /* trid2 is not used, and simply removed. */ 2523 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2524 CU_ASSERT(rc == 0); 2525 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2526 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2527 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2528 } 2529 2530 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2531 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2532 2533 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2534 attach_ctrlr_done, NULL, &opts, NULL, false); 2535 CU_ASSERT(rc == 0); 2536 2537 spdk_delay_us(1000); 2538 poll_threads(); 2539 2540 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2541 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2542 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2543 break; 2544 } 2545 } 2546 CU_ASSERT(ctrid != NULL); 2547 2548 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2549 * If we add path2 again, path2 should be inserted between path1 and path3. 2550 * Then, we remove path2. It is not used, and simply removed. 2551 */ 2552 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2553 2554 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2555 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2556 2557 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2558 attach_ctrlr_done, NULL, &opts, NULL, false); 2559 CU_ASSERT(rc == 0); 2560 2561 spdk_delay_us(1000); 2562 poll_threads(); 2563 2564 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2565 2566 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2567 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2568 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2569 2570 ctrid = TAILQ_NEXT(ctrid, link); 2571 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2572 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2573 2574 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2575 CU_ASSERT(rc == 0); 2576 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2577 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2578 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2579 } 2580 2581 /* path1 is currently used and path3 is an alternative path. 2582 * If we remove path1, path is changed to path3. 2583 */ 2584 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2585 CU_ASSERT(rc == 0); 2586 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2587 CU_ASSERT(nvme_ctrlr->resetting == true); 2588 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2589 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2590 } 2591 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2592 2593 poll_threads(); 2594 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2595 poll_threads(); 2596 2597 CU_ASSERT(nvme_ctrlr->resetting == false); 2598 2599 /* path3 is the current and only path. If we remove path3, the corresponding 2600 * nvme_ctrlr is removed. 2601 */ 2602 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2603 CU_ASSERT(rc == 0); 2604 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2605 2606 poll_threads(); 2607 spdk_delay_us(1000); 2608 poll_threads(); 2609 2610 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2611 2612 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2613 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2614 2615 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2616 attach_ctrlr_done, NULL, &opts, NULL, false); 2617 CU_ASSERT(rc == 0); 2618 2619 spdk_delay_us(1000); 2620 poll_threads(); 2621 2622 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2623 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2624 2625 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2626 2627 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2628 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2629 2630 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2631 attach_ctrlr_done, NULL, &opts, NULL, false); 2632 CU_ASSERT(rc == 0); 2633 2634 spdk_delay_us(1000); 2635 poll_threads(); 2636 2637 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2638 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2639 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2640 break; 2641 } 2642 } 2643 CU_ASSERT(ctrid != NULL); 2644 2645 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2646 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2647 CU_ASSERT(rc == 0); 2648 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2649 2650 poll_threads(); 2651 spdk_delay_us(1000); 2652 poll_threads(); 2653 2654 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2655 } 2656 2657 static void 2658 test_abort(void) 2659 { 2660 struct spdk_nvme_transport_id trid = {}; 2661 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 2662 struct spdk_nvme_ctrlr *ctrlr; 2663 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 2664 struct nvme_ctrlr *nvme_ctrlr; 2665 const int STRING_SIZE = 32; 2666 const char *attached_names[STRING_SIZE]; 2667 struct nvme_bdev *bdev; 2668 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2669 struct spdk_io_channel *ch1, *ch2; 2670 struct nvme_bdev_channel *nbdev_ch1; 2671 struct nvme_io_path *io_path1; 2672 struct nvme_qpair *nvme_qpair1; 2673 int rc; 2674 2675 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2676 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2677 * are submitted on thread 1. Both should succeed. 2678 */ 2679 2680 ut_init_trid(&trid); 2681 2682 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2683 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2684 2685 g_ut_attach_ctrlr_status = 0; 2686 g_ut_attach_bdev_count = 1; 2687 2688 set_thread(1); 2689 2690 opts.ctrlr_loss_timeout_sec = -1; 2691 opts.reconnect_delay_sec = 1; 2692 2693 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2694 attach_ctrlr_done, NULL, &dopts, &opts, false); 2695 CU_ASSERT(rc == 0); 2696 2697 spdk_delay_us(1000); 2698 poll_threads(); 2699 2700 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2701 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2702 2703 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2704 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2705 2706 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2707 ut_bdev_io_set_buf(write_io); 2708 2709 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2710 ut_bdev_io_set_buf(fuse_io); 2711 2712 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2713 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2714 2715 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2716 2717 set_thread(0); 2718 2719 ch1 = spdk_get_io_channel(bdev); 2720 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2721 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2722 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2723 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2724 nvme_qpair1 = io_path1->qpair; 2725 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2726 2727 set_thread(1); 2728 2729 ch2 = spdk_get_io_channel(bdev); 2730 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2731 2732 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2733 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2734 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2735 2736 /* Aborting the already completed request should fail. */ 2737 write_io->internal.in_submit_request = true; 2738 bdev_nvme_submit_request(ch1, write_io); 2739 poll_threads(); 2740 2741 CU_ASSERT(write_io->internal.in_submit_request == false); 2742 2743 abort_io->u.abort.bio_to_abort = write_io; 2744 abort_io->internal.in_submit_request = true; 2745 2746 bdev_nvme_submit_request(ch1, abort_io); 2747 2748 poll_threads(); 2749 2750 CU_ASSERT(abort_io->internal.in_submit_request == false); 2751 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2752 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2753 2754 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2755 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2756 2757 admin_io->internal.in_submit_request = true; 2758 bdev_nvme_submit_request(ch1, admin_io); 2759 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2760 poll_threads(); 2761 2762 CU_ASSERT(admin_io->internal.in_submit_request == false); 2763 2764 abort_io->u.abort.bio_to_abort = admin_io; 2765 abort_io->internal.in_submit_request = true; 2766 2767 bdev_nvme_submit_request(ch2, abort_io); 2768 2769 poll_threads(); 2770 2771 CU_ASSERT(abort_io->internal.in_submit_request == false); 2772 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2773 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2774 2775 /* Aborting the write request should succeed. */ 2776 write_io->internal.in_submit_request = true; 2777 bdev_nvme_submit_request(ch1, write_io); 2778 2779 CU_ASSERT(write_io->internal.in_submit_request == true); 2780 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2781 2782 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2783 abort_io->u.abort.bio_to_abort = write_io; 2784 abort_io->internal.in_submit_request = true; 2785 2786 bdev_nvme_submit_request(ch1, abort_io); 2787 2788 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2789 poll_threads(); 2790 2791 CU_ASSERT(abort_io->internal.in_submit_request == false); 2792 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2793 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2794 CU_ASSERT(write_io->internal.in_submit_request == false); 2795 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2796 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2797 2798 /* Aborting the fuse request should succeed. */ 2799 fuse_io->internal.in_submit_request = true; 2800 bdev_nvme_submit_request(ch1, fuse_io); 2801 2802 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2803 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2804 2805 abort_io->u.abort.bio_to_abort = fuse_io; 2806 abort_io->internal.in_submit_request = true; 2807 2808 bdev_nvme_submit_request(ch1, abort_io); 2809 2810 spdk_delay_us(10000); 2811 poll_threads(); 2812 2813 CU_ASSERT(abort_io->internal.in_submit_request == false); 2814 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2815 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2816 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2817 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2818 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2819 2820 /* Aborting the admin request should succeed. */ 2821 admin_io->internal.in_submit_request = true; 2822 bdev_nvme_submit_request(ch1, admin_io); 2823 2824 CU_ASSERT(admin_io->internal.in_submit_request == true); 2825 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2826 2827 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2828 abort_io->u.abort.bio_to_abort = admin_io; 2829 abort_io->internal.in_submit_request = true; 2830 2831 bdev_nvme_submit_request(ch2, abort_io); 2832 2833 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2834 poll_threads(); 2835 2836 CU_ASSERT(abort_io->internal.in_submit_request == false); 2837 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2838 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2839 CU_ASSERT(admin_io->internal.in_submit_request == false); 2840 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2841 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2842 2843 set_thread(0); 2844 2845 /* If qpair is disconnected, it is freed and then reconnected via resetting 2846 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2847 * while resetting the nvme_ctrlr. 2848 */ 2849 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2850 2851 poll_thread_times(0, 3); 2852 2853 CU_ASSERT(nvme_qpair1->qpair == NULL); 2854 CU_ASSERT(nvme_ctrlr->resetting == true); 2855 2856 write_io->internal.in_submit_request = true; 2857 2858 bdev_nvme_submit_request(ch1, write_io); 2859 2860 CU_ASSERT(write_io->internal.in_submit_request == true); 2861 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 2862 2863 /* Aborting the queued write request should succeed immediately. */ 2864 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2865 abort_io->u.abort.bio_to_abort = write_io; 2866 abort_io->internal.in_submit_request = true; 2867 2868 bdev_nvme_submit_request(ch1, abort_io); 2869 2870 CU_ASSERT(abort_io->internal.in_submit_request == false); 2871 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2872 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2873 CU_ASSERT(write_io->internal.in_submit_request == false); 2874 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2875 2876 poll_threads(); 2877 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2878 poll_threads(); 2879 2880 spdk_put_io_channel(ch1); 2881 2882 set_thread(1); 2883 2884 spdk_put_io_channel(ch2); 2885 2886 poll_threads(); 2887 2888 free(write_io); 2889 free(fuse_io); 2890 free(admin_io); 2891 free(abort_io); 2892 2893 set_thread(1); 2894 2895 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2896 CU_ASSERT(rc == 0); 2897 2898 poll_threads(); 2899 spdk_delay_us(1000); 2900 poll_threads(); 2901 2902 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2903 } 2904 2905 static void 2906 test_get_io_qpair(void) 2907 { 2908 struct spdk_nvme_transport_id trid = {}; 2909 struct spdk_nvme_ctrlr ctrlr = {}; 2910 struct nvme_ctrlr *nvme_ctrlr = NULL; 2911 struct spdk_io_channel *ch; 2912 struct nvme_ctrlr_channel *ctrlr_ch; 2913 struct spdk_nvme_qpair *qpair; 2914 int rc; 2915 2916 ut_init_trid(&trid); 2917 TAILQ_INIT(&ctrlr.active_io_qpairs); 2918 2919 set_thread(0); 2920 2921 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2922 CU_ASSERT(rc == 0); 2923 2924 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2925 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2926 2927 ch = spdk_get_io_channel(nvme_ctrlr); 2928 SPDK_CU_ASSERT_FATAL(ch != NULL); 2929 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2930 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2931 2932 qpair = bdev_nvme_get_io_qpair(ch); 2933 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2934 2935 spdk_put_io_channel(ch); 2936 2937 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2938 CU_ASSERT(rc == 0); 2939 2940 poll_threads(); 2941 spdk_delay_us(1000); 2942 poll_threads(); 2943 2944 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2945 } 2946 2947 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2948 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2949 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2950 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2951 */ 2952 static void 2953 test_bdev_unregister(void) 2954 { 2955 struct spdk_nvme_transport_id trid = {}; 2956 struct spdk_nvme_ctrlr *ctrlr; 2957 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2958 struct nvme_ctrlr *nvme_ctrlr; 2959 struct nvme_ns *nvme_ns1, *nvme_ns2; 2960 const int STRING_SIZE = 32; 2961 const char *attached_names[STRING_SIZE]; 2962 struct nvme_bdev *bdev1, *bdev2; 2963 int rc; 2964 2965 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2966 ut_init_trid(&trid); 2967 2968 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2969 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2970 2971 g_ut_attach_ctrlr_status = 0; 2972 g_ut_attach_bdev_count = 2; 2973 2974 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2975 attach_ctrlr_done, NULL, &opts, NULL, false); 2976 CU_ASSERT(rc == 0); 2977 2978 spdk_delay_us(1000); 2979 poll_threads(); 2980 2981 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2982 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2983 2984 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2985 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2986 2987 bdev1 = nvme_ns1->bdev; 2988 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2989 2990 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2991 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2992 2993 bdev2 = nvme_ns2->bdev; 2994 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2995 2996 bdev_nvme_destruct(&bdev1->disk); 2997 bdev_nvme_destruct(&bdev2->disk); 2998 2999 poll_threads(); 3000 3001 CU_ASSERT(nvme_ns1->bdev == NULL); 3002 CU_ASSERT(nvme_ns2->bdev == NULL); 3003 3004 nvme_ctrlr->destruct = true; 3005 _nvme_ctrlr_destruct(nvme_ctrlr); 3006 3007 poll_threads(); 3008 spdk_delay_us(1000); 3009 poll_threads(); 3010 3011 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3012 } 3013 3014 static void 3015 test_compare_ns(void) 3016 { 3017 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3018 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3019 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3020 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3021 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3022 3023 /* No IDs are defined. */ 3024 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3025 3026 /* Only EUI64 are defined and not matched. */ 3027 nsdata1.eui64 = 0xABCDEF0123456789; 3028 nsdata2.eui64 = 0xBBCDEF0123456789; 3029 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3030 3031 /* Only EUI64 are defined and matched. */ 3032 nsdata2.eui64 = 0xABCDEF0123456789; 3033 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3034 3035 /* Only NGUID are defined and not matched. */ 3036 nsdata1.eui64 = 0x0; 3037 nsdata2.eui64 = 0x0; 3038 nsdata1.nguid[0] = 0x12; 3039 nsdata2.nguid[0] = 0x10; 3040 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3041 3042 /* Only NGUID are defined and matched. */ 3043 nsdata2.nguid[0] = 0x12; 3044 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3045 3046 /* Only UUID are defined and not matched. */ 3047 nsdata1.nguid[0] = 0x0; 3048 nsdata2.nguid[0] = 0x0; 3049 ns1.uuid = &uuid1; 3050 ns2.uuid = &uuid2; 3051 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3052 3053 /* Only one UUID is defined. */ 3054 ns1.uuid = NULL; 3055 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3056 3057 /* Only UUID are defined and matched. */ 3058 ns1.uuid = &uuid2; 3059 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3060 3061 /* All EUI64, NGUID, and UUID are defined and matched. */ 3062 nsdata1.eui64 = 0x123456789ABCDEF; 3063 nsdata2.eui64 = 0x123456789ABCDEF; 3064 nsdata1.nguid[15] = 0x34; 3065 nsdata2.nguid[15] = 0x34; 3066 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3067 3068 /* CSI are not matched. */ 3069 ns1.csi = SPDK_NVME_CSI_ZNS; 3070 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3071 } 3072 3073 static void 3074 test_init_ana_log_page(void) 3075 { 3076 struct spdk_nvme_transport_id trid = {}; 3077 struct spdk_nvme_ctrlr *ctrlr; 3078 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3079 struct nvme_ctrlr *nvme_ctrlr; 3080 const int STRING_SIZE = 32; 3081 const char *attached_names[STRING_SIZE]; 3082 int rc; 3083 3084 set_thread(0); 3085 3086 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3087 ut_init_trid(&trid); 3088 3089 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3090 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3091 3092 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3093 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3094 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3095 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3096 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3097 3098 g_ut_attach_ctrlr_status = 0; 3099 g_ut_attach_bdev_count = 5; 3100 3101 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3102 attach_ctrlr_done, NULL, &opts, NULL, false); 3103 CU_ASSERT(rc == 0); 3104 3105 spdk_delay_us(1000); 3106 poll_threads(); 3107 3108 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3109 poll_threads(); 3110 3111 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3112 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3113 3114 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3115 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3116 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3117 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3118 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3119 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3120 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3121 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3122 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3123 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3124 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3125 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3126 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3127 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3128 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3129 3130 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3131 CU_ASSERT(rc == 0); 3132 3133 poll_threads(); 3134 spdk_delay_us(1000); 3135 poll_threads(); 3136 3137 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3138 } 3139 3140 static void 3141 init_accel(void) 3142 { 3143 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3144 sizeof(int), "accel_p"); 3145 } 3146 3147 static void 3148 fini_accel(void) 3149 { 3150 spdk_io_device_unregister(g_accel_p, NULL); 3151 } 3152 3153 static void 3154 test_get_memory_domains(void) 3155 { 3156 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3157 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3158 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3159 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3160 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3161 struct spdk_memory_domain *domains[4] = {}; 3162 int rc = 0; 3163 3164 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3165 3166 /* nvme controller doesn't have memory domains */ 3167 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3168 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3169 CU_ASSERT(rc == 0); 3170 CU_ASSERT(domains[0] == NULL); 3171 CU_ASSERT(domains[1] == NULL); 3172 3173 /* nvme controller has a memory domain */ 3174 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3175 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3176 CU_ASSERT(rc == 1); 3177 CU_ASSERT(domains[0] != NULL); 3178 memset(domains, 0, sizeof(domains)); 3179 3180 /* multipath, 2 controllers report 1 memory domain each */ 3181 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3182 3183 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3184 CU_ASSERT(rc == 2); 3185 CU_ASSERT(domains[0] != NULL); 3186 CU_ASSERT(domains[1] != NULL); 3187 memset(domains, 0, sizeof(domains)); 3188 3189 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3190 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3191 CU_ASSERT(rc == 2); 3192 3193 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3194 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3195 CU_ASSERT(rc == 2); 3196 CU_ASSERT(domains[0] == NULL); 3197 CU_ASSERT(domains[1] == NULL); 3198 3199 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3200 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3201 CU_ASSERT(rc == 2); 3202 CU_ASSERT(domains[0] != NULL); 3203 CU_ASSERT(domains[1] == NULL); 3204 memset(domains, 0, sizeof(domains)); 3205 3206 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3207 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3208 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3209 CU_ASSERT(rc == 4); 3210 CU_ASSERT(domains[0] != NULL); 3211 CU_ASSERT(domains[1] != NULL); 3212 CU_ASSERT(domains[2] != NULL); 3213 CU_ASSERT(domains[3] != NULL); 3214 memset(domains, 0, sizeof(domains)); 3215 3216 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3217 * Array size is less than the number of memory domains */ 3218 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3219 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3220 CU_ASSERT(rc == 4); 3221 CU_ASSERT(domains[0] != NULL); 3222 CU_ASSERT(domains[1] != NULL); 3223 CU_ASSERT(domains[2] != NULL); 3224 CU_ASSERT(domains[3] == NULL); 3225 memset(domains, 0, sizeof(domains)); 3226 3227 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3228 } 3229 3230 static void 3231 test_reconnect_qpair(void) 3232 { 3233 struct spdk_nvme_transport_id trid = {}; 3234 struct spdk_nvme_ctrlr *ctrlr; 3235 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3236 struct nvme_ctrlr *nvme_ctrlr; 3237 const int STRING_SIZE = 32; 3238 const char *attached_names[STRING_SIZE]; 3239 struct nvme_bdev *bdev; 3240 struct spdk_io_channel *ch1, *ch2; 3241 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3242 struct nvme_io_path *io_path1, *io_path2; 3243 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3244 int rc; 3245 3246 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3247 ut_init_trid(&trid); 3248 3249 set_thread(0); 3250 3251 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3252 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3253 3254 g_ut_attach_ctrlr_status = 0; 3255 g_ut_attach_bdev_count = 1; 3256 3257 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3258 attach_ctrlr_done, NULL, &opts, NULL, false); 3259 CU_ASSERT(rc == 0); 3260 3261 spdk_delay_us(1000); 3262 poll_threads(); 3263 3264 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3265 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3266 3267 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3268 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3269 3270 ch1 = spdk_get_io_channel(bdev); 3271 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3272 3273 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3274 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3275 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3276 nvme_qpair1 = io_path1->qpair; 3277 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3278 3279 set_thread(1); 3280 3281 ch2 = spdk_get_io_channel(bdev); 3282 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3283 3284 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3285 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3286 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3287 nvme_qpair2 = io_path2->qpair; 3288 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3289 3290 /* If a qpair is disconnected, it is freed and then reconnected via 3291 * resetting the corresponding nvme_ctrlr. 3292 */ 3293 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3294 ctrlr->is_failed = true; 3295 3296 poll_thread_times(1, 3); 3297 CU_ASSERT(nvme_qpair1->qpair != NULL); 3298 CU_ASSERT(nvme_qpair2->qpair == NULL); 3299 CU_ASSERT(nvme_ctrlr->resetting == true); 3300 3301 poll_thread_times(0, 3); 3302 CU_ASSERT(nvme_qpair1->qpair == NULL); 3303 CU_ASSERT(nvme_qpair2->qpair == NULL); 3304 CU_ASSERT(ctrlr->is_failed == true); 3305 3306 poll_thread_times(1, 2); 3307 poll_thread_times(0, 1); 3308 CU_ASSERT(ctrlr->is_failed == false); 3309 CU_ASSERT(ctrlr->adminq.is_connected == false); 3310 3311 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3312 poll_thread_times(0, 2); 3313 CU_ASSERT(ctrlr->adminq.is_connected == true); 3314 3315 poll_thread_times(0, 1); 3316 poll_thread_times(1, 1); 3317 CU_ASSERT(nvme_qpair1->qpair != NULL); 3318 CU_ASSERT(nvme_qpair2->qpair != NULL); 3319 CU_ASSERT(nvme_ctrlr->resetting == true); 3320 3321 poll_thread_times(0, 2); 3322 poll_thread_times(1, 1); 3323 poll_thread_times(0, 1); 3324 CU_ASSERT(nvme_ctrlr->resetting == false); 3325 3326 poll_threads(); 3327 3328 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3329 * fails, the qpair is just freed. 3330 */ 3331 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3332 ctrlr->is_failed = true; 3333 ctrlr->fail_reset = true; 3334 3335 poll_thread_times(1, 3); 3336 CU_ASSERT(nvme_qpair1->qpair != NULL); 3337 CU_ASSERT(nvme_qpair2->qpair == NULL); 3338 CU_ASSERT(nvme_ctrlr->resetting == true); 3339 3340 poll_thread_times(0, 3); 3341 poll_thread_times(1, 1); 3342 CU_ASSERT(nvme_qpair1->qpair == NULL); 3343 CU_ASSERT(nvme_qpair2->qpair == NULL); 3344 CU_ASSERT(ctrlr->is_failed == true); 3345 3346 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3347 poll_thread_times(0, 3); 3348 poll_thread_times(1, 1); 3349 poll_thread_times(0, 1); 3350 CU_ASSERT(ctrlr->is_failed == true); 3351 CU_ASSERT(nvme_ctrlr->resetting == false); 3352 CU_ASSERT(nvme_qpair1->qpair == NULL); 3353 CU_ASSERT(nvme_qpair2->qpair == NULL); 3354 3355 poll_threads(); 3356 3357 spdk_put_io_channel(ch2); 3358 3359 set_thread(0); 3360 3361 spdk_put_io_channel(ch1); 3362 3363 poll_threads(); 3364 3365 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3366 CU_ASSERT(rc == 0); 3367 3368 poll_threads(); 3369 spdk_delay_us(1000); 3370 poll_threads(); 3371 3372 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3373 } 3374 3375 static void 3376 test_create_bdev_ctrlr(void) 3377 { 3378 struct nvme_path_id path1 = {}, path2 = {}; 3379 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3380 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3381 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3382 const int STRING_SIZE = 32; 3383 const char *attached_names[STRING_SIZE]; 3384 int rc; 3385 3386 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3387 ut_init_trid(&path1.trid); 3388 ut_init_trid2(&path2.trid); 3389 3390 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3391 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3392 3393 g_ut_attach_ctrlr_status = 0; 3394 g_ut_attach_bdev_count = 0; 3395 3396 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3397 attach_ctrlr_done, NULL, &opts, NULL, true); 3398 CU_ASSERT(rc == 0); 3399 3400 spdk_delay_us(1000); 3401 poll_threads(); 3402 3403 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3404 poll_threads(); 3405 3406 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3407 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3408 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3409 3410 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3411 g_ut_attach_ctrlr_status = -EINVAL; 3412 3413 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3414 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3415 3416 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3417 3418 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3419 attach_ctrlr_done, NULL, &opts, NULL, true); 3420 CU_ASSERT(rc == 0); 3421 3422 spdk_delay_us(1000); 3423 poll_threads(); 3424 3425 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3426 poll_threads(); 3427 3428 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3429 3430 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3431 g_ut_attach_ctrlr_status = 0; 3432 3433 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3434 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3435 3436 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3437 attach_ctrlr_done, NULL, &opts, NULL, true); 3438 CU_ASSERT(rc == 0); 3439 3440 spdk_delay_us(1000); 3441 poll_threads(); 3442 3443 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3444 poll_threads(); 3445 3446 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3447 3448 /* Delete two ctrlrs at once. */ 3449 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3450 CU_ASSERT(rc == 0); 3451 3452 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3453 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3454 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3455 3456 poll_threads(); 3457 spdk_delay_us(1000); 3458 poll_threads(); 3459 3460 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3461 3462 /* Add two ctrlrs and delete one by one. */ 3463 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3464 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3465 3466 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3467 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3468 3469 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3470 attach_ctrlr_done, NULL, &opts, NULL, true); 3471 CU_ASSERT(rc == 0); 3472 3473 spdk_delay_us(1000); 3474 poll_threads(); 3475 3476 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3477 poll_threads(); 3478 3479 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3480 attach_ctrlr_done, NULL, &opts, NULL, true); 3481 CU_ASSERT(rc == 0); 3482 3483 spdk_delay_us(1000); 3484 poll_threads(); 3485 3486 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3487 poll_threads(); 3488 3489 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3490 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3491 3492 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3493 CU_ASSERT(rc == 0); 3494 3495 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3496 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3497 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3498 3499 poll_threads(); 3500 spdk_delay_us(1000); 3501 poll_threads(); 3502 3503 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3504 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3505 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3506 3507 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3508 CU_ASSERT(rc == 0); 3509 3510 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3511 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3512 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3513 3514 poll_threads(); 3515 spdk_delay_us(1000); 3516 poll_threads(); 3517 3518 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3519 } 3520 3521 static struct nvme_ns * 3522 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3523 { 3524 struct nvme_ns *nvme_ns; 3525 3526 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3527 if (nvme_ns->ctrlr == nvme_ctrlr) { 3528 return nvme_ns; 3529 } 3530 } 3531 3532 return NULL; 3533 } 3534 3535 static void 3536 test_add_multi_ns_to_bdev(void) 3537 { 3538 struct nvme_path_id path1 = {}, path2 = {}; 3539 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3540 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3541 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3542 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3543 struct nvme_ns *nvme_ns1, *nvme_ns2; 3544 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3545 const int STRING_SIZE = 32; 3546 const char *attached_names[STRING_SIZE]; 3547 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3548 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3549 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3550 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3551 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3552 int rc; 3553 3554 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3555 ut_init_trid(&path1.trid); 3556 ut_init_trid2(&path2.trid); 3557 3558 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3559 3560 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3561 * namespaces are populated. 3562 */ 3563 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3564 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3565 3566 ctrlr1->ns[1].is_active = false; 3567 ctrlr1->ns[4].is_active = false; 3568 ctrlr1->ns[0].uuid = &uuid1; 3569 ctrlr1->ns[2].uuid = &uuid3; 3570 ctrlr1->ns[3].uuid = &uuid4; 3571 3572 g_ut_attach_ctrlr_status = 0; 3573 g_ut_attach_bdev_count = 3; 3574 3575 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3576 attach_ctrlr_done, NULL, &opts, NULL, true); 3577 CU_ASSERT(rc == 0); 3578 3579 spdk_delay_us(1000); 3580 poll_threads(); 3581 3582 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3583 poll_threads(); 3584 3585 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3586 * namespaces are populated. The uuid of 4th namespace is different, and hence 3587 * adding 4th namespace to a bdev should fail. 3588 */ 3589 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3590 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3591 3592 ctrlr2->ns[2].is_active = false; 3593 ctrlr2->ns[4].is_active = false; 3594 ctrlr2->ns[0].uuid = &uuid1; 3595 ctrlr2->ns[1].uuid = &uuid2; 3596 ctrlr2->ns[3].uuid = &uuid44; 3597 3598 g_ut_attach_ctrlr_status = 0; 3599 g_ut_attach_bdev_count = 2; 3600 3601 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3602 attach_ctrlr_done, NULL, &opts, NULL, true); 3603 CU_ASSERT(rc == 0); 3604 3605 spdk_delay_us(1000); 3606 poll_threads(); 3607 3608 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3609 poll_threads(); 3610 3611 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3612 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3613 3614 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3615 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3616 3617 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3618 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3619 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3620 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3621 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3622 3623 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3624 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3625 3626 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3627 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3628 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3629 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3630 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3631 3632 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3633 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3634 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3635 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3636 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3637 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3638 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3639 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3640 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3641 3642 CU_ASSERT(bdev1->ref == 2); 3643 CU_ASSERT(bdev2->ref == 1); 3644 CU_ASSERT(bdev3->ref == 1); 3645 CU_ASSERT(bdev4->ref == 1); 3646 3647 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3648 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3649 CU_ASSERT(rc == 0); 3650 3651 poll_threads(); 3652 spdk_delay_us(1000); 3653 poll_threads(); 3654 3655 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3656 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3657 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2); 3658 3659 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3660 CU_ASSERT(rc == 0); 3661 3662 poll_threads(); 3663 spdk_delay_us(1000); 3664 poll_threads(); 3665 3666 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3667 3668 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3669 * can be deleted when the bdev subsystem shutdown. 3670 */ 3671 g_ut_attach_bdev_count = 1; 3672 3673 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3674 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3675 3676 ctrlr1->ns[0].uuid = &uuid1; 3677 3678 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3679 attach_ctrlr_done, NULL, &opts, NULL, true); 3680 CU_ASSERT(rc == 0); 3681 3682 spdk_delay_us(1000); 3683 poll_threads(); 3684 3685 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3686 poll_threads(); 3687 3688 ut_init_trid2(&path2.trid); 3689 3690 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3691 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3692 3693 ctrlr2->ns[0].uuid = &uuid1; 3694 3695 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3696 attach_ctrlr_done, NULL, &opts, NULL, true); 3697 CU_ASSERT(rc == 0); 3698 3699 spdk_delay_us(1000); 3700 poll_threads(); 3701 3702 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3703 poll_threads(); 3704 3705 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3706 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3707 3708 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3709 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3710 3711 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3712 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3713 3714 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3715 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3716 3717 /* Check if a nvme_bdev has two nvme_ns. */ 3718 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3719 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3720 CU_ASSERT(nvme_ns1->bdev == bdev1); 3721 3722 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3723 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3724 CU_ASSERT(nvme_ns2->bdev == bdev1); 3725 3726 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3727 bdev_nvme_destruct(&bdev1->disk); 3728 3729 poll_threads(); 3730 3731 CU_ASSERT(nvme_ns1->bdev == NULL); 3732 CU_ASSERT(nvme_ns2->bdev == NULL); 3733 3734 nvme_ctrlr1->destruct = true; 3735 _nvme_ctrlr_destruct(nvme_ctrlr1); 3736 3737 poll_threads(); 3738 spdk_delay_us(1000); 3739 poll_threads(); 3740 3741 nvme_ctrlr2->destruct = true; 3742 _nvme_ctrlr_destruct(nvme_ctrlr2); 3743 3744 poll_threads(); 3745 spdk_delay_us(1000); 3746 poll_threads(); 3747 3748 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3749 } 3750 3751 static void 3752 test_add_multi_io_paths_to_nbdev_ch(void) 3753 { 3754 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3755 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3756 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3757 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3758 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3759 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3760 const int STRING_SIZE = 32; 3761 const char *attached_names[STRING_SIZE]; 3762 struct nvme_bdev *bdev; 3763 struct spdk_io_channel *ch; 3764 struct nvme_bdev_channel *nbdev_ch; 3765 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3766 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3767 int rc; 3768 3769 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3770 ut_init_trid(&path1.trid); 3771 ut_init_trid2(&path2.trid); 3772 ut_init_trid3(&path3.trid); 3773 g_ut_attach_ctrlr_status = 0; 3774 g_ut_attach_bdev_count = 1; 3775 3776 set_thread(1); 3777 3778 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3779 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3780 3781 ctrlr1->ns[0].uuid = &uuid1; 3782 3783 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3784 attach_ctrlr_done, NULL, &opts, NULL, true); 3785 CU_ASSERT(rc == 0); 3786 3787 spdk_delay_us(1000); 3788 poll_threads(); 3789 3790 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3791 poll_threads(); 3792 3793 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3794 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3795 3796 ctrlr2->ns[0].uuid = &uuid1; 3797 3798 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3799 attach_ctrlr_done, NULL, &opts, NULL, true); 3800 CU_ASSERT(rc == 0); 3801 3802 spdk_delay_us(1000); 3803 poll_threads(); 3804 3805 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3806 poll_threads(); 3807 3808 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3809 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3810 3811 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3812 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3813 3814 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3815 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3816 3817 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3818 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3819 3820 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3821 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3822 3823 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3824 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3825 3826 set_thread(0); 3827 3828 ch = spdk_get_io_channel(bdev); 3829 SPDK_CU_ASSERT_FATAL(ch != NULL); 3830 nbdev_ch = spdk_io_channel_get_ctx(ch); 3831 3832 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3833 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3834 3835 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3836 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3837 3838 set_thread(1); 3839 3840 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3841 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3842 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3843 3844 ctrlr3->ns[0].uuid = &uuid1; 3845 3846 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3847 attach_ctrlr_done, NULL, &opts, NULL, true); 3848 CU_ASSERT(rc == 0); 3849 3850 spdk_delay_us(1000); 3851 poll_threads(); 3852 3853 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3854 poll_threads(); 3855 3856 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn); 3857 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3858 3859 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3860 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3861 3862 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3863 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3864 3865 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3866 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3867 CU_ASSERT(rc == 0); 3868 3869 poll_threads(); 3870 spdk_delay_us(1000); 3871 poll_threads(); 3872 3873 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1); 3874 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3875 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3); 3876 3877 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3878 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3879 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3880 3881 set_thread(0); 3882 3883 spdk_put_io_channel(ch); 3884 3885 poll_threads(); 3886 3887 set_thread(1); 3888 3889 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3890 CU_ASSERT(rc == 0); 3891 3892 poll_threads(); 3893 spdk_delay_us(1000); 3894 poll_threads(); 3895 3896 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3897 } 3898 3899 static void 3900 test_admin_path(void) 3901 { 3902 struct nvme_path_id path1 = {}, path2 = {}; 3903 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3904 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3905 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3906 const int STRING_SIZE = 32; 3907 const char *attached_names[STRING_SIZE]; 3908 struct nvme_bdev *bdev; 3909 struct spdk_io_channel *ch; 3910 struct spdk_bdev_io *bdev_io; 3911 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3912 int rc; 3913 3914 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3915 ut_init_trid(&path1.trid); 3916 ut_init_trid2(&path2.trid); 3917 g_ut_attach_ctrlr_status = 0; 3918 g_ut_attach_bdev_count = 1; 3919 3920 set_thread(0); 3921 3922 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3923 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3924 3925 ctrlr1->ns[0].uuid = &uuid1; 3926 3927 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3928 attach_ctrlr_done, NULL, &opts, NULL, true); 3929 CU_ASSERT(rc == 0); 3930 3931 spdk_delay_us(1000); 3932 poll_threads(); 3933 3934 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3935 poll_threads(); 3936 3937 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3938 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3939 3940 ctrlr2->ns[0].uuid = &uuid1; 3941 3942 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3943 attach_ctrlr_done, NULL, &opts, NULL, true); 3944 CU_ASSERT(rc == 0); 3945 3946 spdk_delay_us(1000); 3947 poll_threads(); 3948 3949 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3950 poll_threads(); 3951 3952 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3953 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3954 3955 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3956 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3957 3958 ch = spdk_get_io_channel(bdev); 3959 SPDK_CU_ASSERT_FATAL(ch != NULL); 3960 3961 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3962 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3963 3964 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3965 * submitted to ctrlr2. 3966 */ 3967 ctrlr1->is_failed = true; 3968 bdev_io->internal.in_submit_request = true; 3969 3970 bdev_nvme_submit_request(ch, bdev_io); 3971 3972 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3973 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3974 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3975 3976 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3977 poll_threads(); 3978 3979 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3980 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3981 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3982 3983 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3984 ctrlr2->is_failed = true; 3985 bdev_io->internal.in_submit_request = true; 3986 3987 bdev_nvme_submit_request(ch, bdev_io); 3988 3989 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3990 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3991 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3992 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3993 3994 free(bdev_io); 3995 3996 spdk_put_io_channel(ch); 3997 3998 poll_threads(); 3999 4000 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4001 CU_ASSERT(rc == 0); 4002 4003 poll_threads(); 4004 spdk_delay_us(1000); 4005 poll_threads(); 4006 4007 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4008 } 4009 4010 static struct nvme_io_path * 4011 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 4012 struct nvme_ctrlr *nvme_ctrlr) 4013 { 4014 struct nvme_io_path *io_path; 4015 4016 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 4017 if (io_path->qpair->ctrlr == nvme_ctrlr) { 4018 return io_path; 4019 } 4020 } 4021 4022 return NULL; 4023 } 4024 4025 static void 4026 test_reset_bdev_ctrlr(void) 4027 { 4028 struct nvme_path_id path1 = {}, path2 = {}; 4029 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4030 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4031 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4032 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4033 struct nvme_path_id *curr_path1, *curr_path2; 4034 const int STRING_SIZE = 32; 4035 const char *attached_names[STRING_SIZE]; 4036 struct nvme_bdev *bdev; 4037 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4038 struct nvme_bdev_io *first_bio; 4039 struct spdk_io_channel *ch1, *ch2; 4040 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4041 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4042 int rc; 4043 4044 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4045 ut_init_trid(&path1.trid); 4046 ut_init_trid2(&path2.trid); 4047 g_ut_attach_ctrlr_status = 0; 4048 g_ut_attach_bdev_count = 1; 4049 4050 set_thread(0); 4051 4052 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4053 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4054 4055 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4056 attach_ctrlr_done, NULL, &opts, NULL, true); 4057 CU_ASSERT(rc == 0); 4058 4059 spdk_delay_us(1000); 4060 poll_threads(); 4061 4062 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4063 poll_threads(); 4064 4065 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4066 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4067 4068 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4069 attach_ctrlr_done, NULL, &opts, NULL, true); 4070 CU_ASSERT(rc == 0); 4071 4072 spdk_delay_us(1000); 4073 poll_threads(); 4074 4075 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4076 poll_threads(); 4077 4078 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4079 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4080 4081 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4082 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4083 4084 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4085 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4086 4087 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4088 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4089 4090 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4091 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4092 4093 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4094 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4095 4096 set_thread(0); 4097 4098 ch1 = spdk_get_io_channel(bdev); 4099 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4100 4101 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4102 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4103 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4104 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4105 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4106 4107 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4108 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4109 4110 set_thread(1); 4111 4112 ch2 = spdk_get_io_channel(bdev); 4113 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4114 4115 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4116 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4117 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4118 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4119 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4120 4121 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4122 4123 /* The first reset request from bdev_io is submitted on thread 0. 4124 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4125 * 4126 * A few extra polls are necessary after resetting ctrlr1 to check 4127 * pending reset requests for ctrlr1. 4128 */ 4129 ctrlr1->is_failed = true; 4130 curr_path1->last_failed_tsc = spdk_get_ticks(); 4131 ctrlr2->is_failed = true; 4132 curr_path2->last_failed_tsc = spdk_get_ticks(); 4133 4134 set_thread(0); 4135 4136 bdev_nvme_submit_request(ch1, first_bdev_io); 4137 CU_ASSERT(first_bio->io_path == io_path11); 4138 CU_ASSERT(nvme_ctrlr1->resetting == true); 4139 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4140 4141 poll_thread_times(0, 3); 4142 CU_ASSERT(io_path11->qpair->qpair == NULL); 4143 CU_ASSERT(io_path21->qpair->qpair != NULL); 4144 4145 poll_thread_times(1, 2); 4146 CU_ASSERT(io_path11->qpair->qpair == NULL); 4147 CU_ASSERT(io_path21->qpair->qpair == NULL); 4148 CU_ASSERT(ctrlr1->is_failed == true); 4149 4150 poll_thread_times(0, 1); 4151 CU_ASSERT(nvme_ctrlr1->resetting == true); 4152 CU_ASSERT(ctrlr1->is_failed == false); 4153 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4154 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4155 4156 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4157 poll_thread_times(0, 2); 4158 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4159 4160 poll_thread_times(0, 1); 4161 CU_ASSERT(io_path11->qpair->qpair != NULL); 4162 CU_ASSERT(io_path21->qpair->qpair == NULL); 4163 4164 poll_thread_times(1, 1); 4165 CU_ASSERT(io_path11->qpair->qpair != NULL); 4166 CU_ASSERT(io_path21->qpair->qpair != NULL); 4167 4168 poll_thread_times(0, 2); 4169 CU_ASSERT(nvme_ctrlr1->resetting == true); 4170 poll_thread_times(1, 1); 4171 CU_ASSERT(nvme_ctrlr1->resetting == true); 4172 poll_thread_times(0, 2); 4173 CU_ASSERT(nvme_ctrlr1->resetting == false); 4174 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4175 CU_ASSERT(first_bio->io_path == io_path12); 4176 CU_ASSERT(nvme_ctrlr2->resetting == true); 4177 4178 poll_thread_times(0, 3); 4179 CU_ASSERT(io_path12->qpair->qpair == NULL); 4180 CU_ASSERT(io_path22->qpair->qpair != NULL); 4181 4182 poll_thread_times(1, 2); 4183 CU_ASSERT(io_path12->qpair->qpair == NULL); 4184 CU_ASSERT(io_path22->qpair->qpair == NULL); 4185 CU_ASSERT(ctrlr2->is_failed == true); 4186 4187 poll_thread_times(0, 1); 4188 CU_ASSERT(nvme_ctrlr2->resetting == true); 4189 CU_ASSERT(ctrlr2->is_failed == false); 4190 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4191 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4192 4193 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4194 poll_thread_times(0, 2); 4195 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4196 4197 poll_thread_times(0, 1); 4198 CU_ASSERT(io_path12->qpair->qpair != NULL); 4199 CU_ASSERT(io_path22->qpair->qpair == NULL); 4200 4201 poll_thread_times(1, 2); 4202 CU_ASSERT(io_path12->qpair->qpair != NULL); 4203 CU_ASSERT(io_path22->qpair->qpair != NULL); 4204 4205 poll_thread_times(0, 2); 4206 CU_ASSERT(nvme_ctrlr2->resetting == true); 4207 poll_thread_times(1, 1); 4208 CU_ASSERT(nvme_ctrlr2->resetting == true); 4209 poll_thread_times(0, 2); 4210 CU_ASSERT(first_bio->io_path == NULL); 4211 CU_ASSERT(nvme_ctrlr2->resetting == false); 4212 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4213 4214 poll_threads(); 4215 4216 /* There is a race between two reset requests from bdev_io. 4217 * 4218 * The first reset request is submitted on thread 0, and the second reset 4219 * request is submitted on thread 1 while the first is resetting ctrlr1. 4220 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4221 * both reset requests go to ctrlr2. The first comes earlier than the second. 4222 * The second is pending on ctrlr2 again. After the first completes resetting 4223 * ctrl2, both complete successfully. 4224 */ 4225 ctrlr1->is_failed = true; 4226 curr_path1->last_failed_tsc = spdk_get_ticks(); 4227 ctrlr2->is_failed = true; 4228 curr_path2->last_failed_tsc = spdk_get_ticks(); 4229 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4230 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4231 4232 set_thread(0); 4233 4234 bdev_nvme_submit_request(ch1, first_bdev_io); 4235 4236 set_thread(1); 4237 4238 bdev_nvme_submit_request(ch2, second_bdev_io); 4239 4240 CU_ASSERT(nvme_ctrlr1->resetting == true); 4241 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4242 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == 4243 (struct nvme_bdev_io *)second_bdev_io->driver_ctx); 4244 4245 poll_threads(); 4246 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4247 poll_threads(); 4248 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4249 poll_threads(); 4250 4251 CU_ASSERT(ctrlr1->is_failed == false); 4252 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4253 CU_ASSERT(ctrlr2->is_failed == false); 4254 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4255 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4256 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4257 4258 set_thread(0); 4259 4260 spdk_put_io_channel(ch1); 4261 4262 set_thread(1); 4263 4264 spdk_put_io_channel(ch2); 4265 4266 poll_threads(); 4267 4268 set_thread(0); 4269 4270 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4271 CU_ASSERT(rc == 0); 4272 4273 poll_threads(); 4274 spdk_delay_us(1000); 4275 poll_threads(); 4276 4277 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4278 4279 free(first_bdev_io); 4280 free(second_bdev_io); 4281 } 4282 4283 static void 4284 test_find_io_path(void) 4285 { 4286 struct nvme_bdev_channel nbdev_ch = { 4287 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4288 }; 4289 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4290 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4291 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4292 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4293 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4294 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4295 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4296 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4297 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4298 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4299 4300 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4301 4302 /* Test if io_path whose ANA state is not accessible is excluded. */ 4303 4304 nvme_qpair1.qpair = &qpair1; 4305 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4306 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4307 4308 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4309 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4310 4311 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4312 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4313 4314 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4315 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4316 4317 nbdev_ch.current_io_path = NULL; 4318 4319 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4320 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4321 4322 nbdev_ch.current_io_path = NULL; 4323 4324 /* Test if io_path whose qpair is resetting is excluded. */ 4325 4326 nvme_qpair1.qpair = NULL; 4327 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4328 4329 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4330 4331 /* Test if ANA optimized state or the first found ANA non-optimized state 4332 * is prioritized. 4333 */ 4334 4335 nvme_qpair1.qpair = &qpair1; 4336 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4337 nvme_qpair2.qpair = &qpair2; 4338 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4339 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4340 4341 nbdev_ch.current_io_path = NULL; 4342 4343 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4344 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4345 4346 nbdev_ch.current_io_path = NULL; 4347 } 4348 4349 static void 4350 test_retry_io_if_ana_state_is_updating(void) 4351 { 4352 struct nvme_path_id path = {}; 4353 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 4354 struct spdk_nvme_ctrlr *ctrlr; 4355 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 4356 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4357 struct nvme_ctrlr *nvme_ctrlr; 4358 const int STRING_SIZE = 32; 4359 const char *attached_names[STRING_SIZE]; 4360 struct nvme_bdev *bdev; 4361 struct nvme_ns *nvme_ns; 4362 struct spdk_bdev_io *bdev_io1; 4363 struct spdk_io_channel *ch; 4364 struct nvme_bdev_channel *nbdev_ch; 4365 struct nvme_io_path *io_path; 4366 struct nvme_qpair *nvme_qpair; 4367 int rc; 4368 4369 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4370 ut_init_trid(&path.trid); 4371 4372 set_thread(0); 4373 4374 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4375 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4376 4377 g_ut_attach_ctrlr_status = 0; 4378 g_ut_attach_bdev_count = 1; 4379 4380 opts.ctrlr_loss_timeout_sec = -1; 4381 opts.reconnect_delay_sec = 1; 4382 4383 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4384 attach_ctrlr_done, NULL, &dopts, &opts, false); 4385 CU_ASSERT(rc == 0); 4386 4387 spdk_delay_us(1000); 4388 poll_threads(); 4389 4390 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4391 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4392 4393 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 4394 CU_ASSERT(nvme_ctrlr != NULL); 4395 4396 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4397 CU_ASSERT(bdev != NULL); 4398 4399 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4400 CU_ASSERT(nvme_ns != NULL); 4401 4402 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4403 ut_bdev_io_set_buf(bdev_io1); 4404 4405 ch = spdk_get_io_channel(bdev); 4406 SPDK_CU_ASSERT_FATAL(ch != NULL); 4407 4408 nbdev_ch = spdk_io_channel_get_ctx(ch); 4409 4410 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4411 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4412 4413 nvme_qpair = io_path->qpair; 4414 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4415 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4416 4417 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4418 4419 /* If qpair is connected, I/O should succeed. */ 4420 bdev_io1->internal.in_submit_request = true; 4421 4422 bdev_nvme_submit_request(ch, bdev_io1); 4423 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4424 4425 poll_threads(); 4426 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4427 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4428 4429 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4430 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4431 nbdev_ch->current_io_path = NULL; 4432 4433 bdev_io1->internal.in_submit_request = true; 4434 4435 bdev_nvme_submit_request(ch, bdev_io1); 4436 4437 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4438 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4439 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4440 4441 /* ANA state became accessible while I/O was queued. */ 4442 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4443 4444 spdk_delay_us(1000000); 4445 4446 poll_thread_times(0, 1); 4447 4448 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4449 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4450 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4451 4452 poll_threads(); 4453 4454 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4455 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4456 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4457 4458 free(bdev_io1); 4459 4460 spdk_put_io_channel(ch); 4461 4462 poll_threads(); 4463 4464 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4465 CU_ASSERT(rc == 0); 4466 4467 poll_threads(); 4468 spdk_delay_us(1000); 4469 poll_threads(); 4470 4471 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4472 } 4473 4474 static void 4475 test_retry_io_for_io_path_error(void) 4476 { 4477 struct nvme_path_id path1 = {}, path2 = {}; 4478 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4479 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4480 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4481 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4482 const int STRING_SIZE = 32; 4483 const char *attached_names[STRING_SIZE]; 4484 struct nvme_bdev *bdev; 4485 struct nvme_ns *nvme_ns1, *nvme_ns2; 4486 struct spdk_bdev_io *bdev_io; 4487 struct nvme_bdev_io *bio; 4488 struct spdk_io_channel *ch; 4489 struct nvme_bdev_channel *nbdev_ch; 4490 struct nvme_io_path *io_path1, *io_path2; 4491 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4492 struct ut_nvme_req *req; 4493 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4494 int rc; 4495 4496 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4497 ut_init_trid(&path1.trid); 4498 ut_init_trid2(&path2.trid); 4499 4500 g_opts.bdev_retry_count = 1; 4501 4502 set_thread(0); 4503 4504 g_ut_attach_ctrlr_status = 0; 4505 g_ut_attach_bdev_count = 1; 4506 4507 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4508 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4509 4510 ctrlr1->ns[0].uuid = &uuid1; 4511 4512 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4513 attach_ctrlr_done, NULL, &opts, NULL, true); 4514 CU_ASSERT(rc == 0); 4515 4516 spdk_delay_us(1000); 4517 poll_threads(); 4518 4519 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4520 poll_threads(); 4521 4522 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4523 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4524 4525 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4526 CU_ASSERT(nvme_ctrlr1 != NULL); 4527 4528 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4529 CU_ASSERT(bdev != NULL); 4530 4531 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4532 CU_ASSERT(nvme_ns1 != NULL); 4533 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4534 4535 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4536 ut_bdev_io_set_buf(bdev_io); 4537 4538 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4539 4540 ch = spdk_get_io_channel(bdev); 4541 SPDK_CU_ASSERT_FATAL(ch != NULL); 4542 4543 nbdev_ch = spdk_io_channel_get_ctx(ch); 4544 4545 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4546 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4547 4548 nvme_qpair1 = io_path1->qpair; 4549 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4550 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4551 4552 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4553 4554 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4555 bdev_io->internal.in_submit_request = true; 4556 4557 bdev_nvme_submit_request(ch, bdev_io); 4558 4559 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4560 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4561 4562 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4563 SPDK_CU_ASSERT_FATAL(req != NULL); 4564 4565 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4566 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4567 req->cpl.status.dnr = 1; 4568 4569 poll_thread_times(0, 1); 4570 4571 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4572 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4573 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4574 4575 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4576 bdev_io->internal.in_submit_request = true; 4577 4578 bdev_nvme_submit_request(ch, bdev_io); 4579 4580 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4581 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4582 4583 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4584 SPDK_CU_ASSERT_FATAL(req != NULL); 4585 4586 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4587 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4588 4589 poll_thread_times(0, 1); 4590 4591 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4592 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4593 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4594 4595 poll_threads(); 4596 4597 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4598 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4599 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4600 4601 /* Add io_path2 dynamically, and create a multipath configuration. */ 4602 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4603 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4604 4605 ctrlr2->ns[0].uuid = &uuid1; 4606 4607 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4608 attach_ctrlr_done, NULL, &opts, NULL, true); 4609 CU_ASSERT(rc == 0); 4610 4611 spdk_delay_us(1000); 4612 poll_threads(); 4613 4614 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4615 poll_threads(); 4616 4617 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4618 CU_ASSERT(nvme_ctrlr2 != NULL); 4619 4620 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4621 CU_ASSERT(nvme_ns2 != NULL); 4622 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4623 4624 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4625 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4626 4627 nvme_qpair2 = io_path2->qpair; 4628 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4629 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4630 4631 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4632 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4633 * So after a retry, I/O is submitted to io_path2 and should succeed. 4634 */ 4635 bdev_io->internal.in_submit_request = true; 4636 4637 bdev_nvme_submit_request(ch, bdev_io); 4638 4639 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4640 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4641 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4642 4643 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4644 SPDK_CU_ASSERT_FATAL(req != NULL); 4645 4646 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4647 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4648 4649 poll_thread_times(0, 1); 4650 4651 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4652 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4653 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4654 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4655 4656 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4657 nvme_qpair1->qpair = NULL; 4658 4659 poll_threads(); 4660 4661 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4662 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4663 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4664 4665 free(bdev_io); 4666 4667 spdk_put_io_channel(ch); 4668 4669 poll_threads(); 4670 4671 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4672 CU_ASSERT(rc == 0); 4673 4674 poll_threads(); 4675 spdk_delay_us(1000); 4676 poll_threads(); 4677 4678 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4679 4680 g_opts.bdev_retry_count = 0; 4681 } 4682 4683 static void 4684 test_retry_io_count(void) 4685 { 4686 struct nvme_path_id path = {}; 4687 struct spdk_nvme_ctrlr *ctrlr; 4688 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4689 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4690 struct nvme_ctrlr *nvme_ctrlr; 4691 const int STRING_SIZE = 32; 4692 const char *attached_names[STRING_SIZE]; 4693 struct nvme_bdev *bdev; 4694 struct nvme_ns *nvme_ns; 4695 struct spdk_bdev_io *bdev_io; 4696 struct nvme_bdev_io *bio; 4697 struct spdk_io_channel *ch; 4698 struct nvme_bdev_channel *nbdev_ch; 4699 struct nvme_io_path *io_path; 4700 struct nvme_qpair *nvme_qpair; 4701 struct ut_nvme_req *req; 4702 int rc; 4703 4704 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4705 ut_init_trid(&path.trid); 4706 4707 set_thread(0); 4708 4709 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4710 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4711 4712 g_ut_attach_ctrlr_status = 0; 4713 g_ut_attach_bdev_count = 1; 4714 4715 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4716 attach_ctrlr_done, NULL, &opts, NULL, false); 4717 CU_ASSERT(rc == 0); 4718 4719 spdk_delay_us(1000); 4720 poll_threads(); 4721 4722 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4723 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4724 4725 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 4726 CU_ASSERT(nvme_ctrlr != NULL); 4727 4728 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4729 CU_ASSERT(bdev != NULL); 4730 4731 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4732 CU_ASSERT(nvme_ns != NULL); 4733 4734 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4735 ut_bdev_io_set_buf(bdev_io); 4736 4737 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4738 4739 ch = spdk_get_io_channel(bdev); 4740 SPDK_CU_ASSERT_FATAL(ch != NULL); 4741 4742 nbdev_ch = spdk_io_channel_get_ctx(ch); 4743 4744 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4745 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4746 4747 nvme_qpair = io_path->qpair; 4748 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4749 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4750 4751 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4752 4753 /* If I/O is aborted by request, it should not be retried. */ 4754 g_opts.bdev_retry_count = 1; 4755 4756 bdev_io->internal.in_submit_request = true; 4757 4758 bdev_nvme_submit_request(ch, bdev_io); 4759 4760 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4761 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4762 4763 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4764 SPDK_CU_ASSERT_FATAL(req != NULL); 4765 4766 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4767 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4768 4769 poll_thread_times(0, 1); 4770 4771 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4772 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4773 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4774 4775 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4776 * the failed I/O should not be retried. 4777 */ 4778 g_opts.bdev_retry_count = 4; 4779 4780 bdev_io->internal.in_submit_request = true; 4781 4782 bdev_nvme_submit_request(ch, bdev_io); 4783 4784 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4785 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4786 4787 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4788 SPDK_CU_ASSERT_FATAL(req != NULL); 4789 4790 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4791 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4792 bio->retry_count = 4; 4793 4794 poll_thread_times(0, 1); 4795 4796 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4797 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4798 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4799 4800 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4801 g_opts.bdev_retry_count = -1; 4802 4803 bdev_io->internal.in_submit_request = true; 4804 4805 bdev_nvme_submit_request(ch, bdev_io); 4806 4807 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4808 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4809 4810 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4811 SPDK_CU_ASSERT_FATAL(req != NULL); 4812 4813 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4814 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4815 bio->retry_count = 4; 4816 4817 poll_thread_times(0, 1); 4818 4819 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4820 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4821 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4822 4823 poll_threads(); 4824 4825 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4826 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4827 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4828 4829 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4830 * the failed I/O should be retried. 4831 */ 4832 g_opts.bdev_retry_count = 4; 4833 4834 bdev_io->internal.in_submit_request = true; 4835 4836 bdev_nvme_submit_request(ch, bdev_io); 4837 4838 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4839 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4840 4841 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4842 SPDK_CU_ASSERT_FATAL(req != NULL); 4843 4844 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4845 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4846 bio->retry_count = 3; 4847 4848 poll_thread_times(0, 1); 4849 4850 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4851 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4852 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4853 4854 poll_threads(); 4855 4856 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4857 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4858 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4859 4860 free(bdev_io); 4861 4862 spdk_put_io_channel(ch); 4863 4864 poll_threads(); 4865 4866 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4867 CU_ASSERT(rc == 0); 4868 4869 poll_threads(); 4870 spdk_delay_us(1000); 4871 poll_threads(); 4872 4873 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4874 4875 g_opts.bdev_retry_count = 0; 4876 } 4877 4878 static void 4879 test_concurrent_read_ana_log_page(void) 4880 { 4881 struct spdk_nvme_transport_id trid = {}; 4882 struct spdk_nvme_ctrlr *ctrlr; 4883 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4884 struct nvme_ctrlr *nvme_ctrlr; 4885 const int STRING_SIZE = 32; 4886 const char *attached_names[STRING_SIZE]; 4887 int rc; 4888 4889 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4890 ut_init_trid(&trid); 4891 4892 set_thread(0); 4893 4894 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4895 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4896 4897 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4898 4899 g_ut_attach_ctrlr_status = 0; 4900 g_ut_attach_bdev_count = 1; 4901 4902 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4903 attach_ctrlr_done, NULL, &opts, NULL, false); 4904 CU_ASSERT(rc == 0); 4905 4906 spdk_delay_us(1000); 4907 poll_threads(); 4908 4909 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4910 poll_threads(); 4911 4912 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4913 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4914 4915 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4916 4917 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4918 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4919 4920 /* Following read request should be rejected. */ 4921 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4922 4923 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4924 4925 set_thread(1); 4926 4927 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4928 4929 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4930 4931 /* Reset request while reading ANA log page should not be rejected. */ 4932 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4933 CU_ASSERT(rc == 0); 4934 4935 poll_threads(); 4936 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4937 poll_threads(); 4938 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4939 poll_threads(); 4940 4941 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4942 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4943 4944 /* Read ANA log page while resetting ctrlr should be rejected. */ 4945 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4946 CU_ASSERT(rc == 0); 4947 4948 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4949 4950 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4951 4952 poll_threads(); 4953 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4954 poll_threads(); 4955 4956 set_thread(0); 4957 4958 /* It is possible that target sent ANA change for inactive namespaces. 4959 * 4960 * Previously, assert() was added because this case was unlikely. 4961 * However, assert() was hit in real environment. 4962 4963 * Hence, remove assert() and add unit test case. 4964 * 4965 * Simulate this case by depopulating namespaces and then parsing ANA 4966 * log page created when all namespaces are active. 4967 * Then, check if parsing ANA log page completes successfully. 4968 */ 4969 nvme_ctrlr_depopulate_namespaces(nvme_ctrlr); 4970 4971 rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr); 4972 CU_ASSERT(rc == 0); 4973 4974 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4975 CU_ASSERT(rc == 0); 4976 4977 poll_threads(); 4978 spdk_delay_us(1000); 4979 poll_threads(); 4980 4981 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4982 } 4983 4984 static void 4985 test_retry_io_for_ana_error(void) 4986 { 4987 struct nvme_path_id path = {}; 4988 struct spdk_nvme_ctrlr *ctrlr; 4989 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4990 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4991 struct nvme_ctrlr *nvme_ctrlr; 4992 const int STRING_SIZE = 32; 4993 const char *attached_names[STRING_SIZE]; 4994 struct nvme_bdev *bdev; 4995 struct nvme_ns *nvme_ns; 4996 struct spdk_bdev_io *bdev_io; 4997 struct nvme_bdev_io *bio; 4998 struct spdk_io_channel *ch; 4999 struct nvme_bdev_channel *nbdev_ch; 5000 struct nvme_io_path *io_path; 5001 struct nvme_qpair *nvme_qpair; 5002 struct ut_nvme_req *req; 5003 uint64_t now; 5004 int rc; 5005 5006 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5007 ut_init_trid(&path.trid); 5008 5009 g_opts.bdev_retry_count = 1; 5010 5011 set_thread(0); 5012 5013 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 5014 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5015 5016 g_ut_attach_ctrlr_status = 0; 5017 g_ut_attach_bdev_count = 1; 5018 5019 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5020 attach_ctrlr_done, NULL, &opts, NULL, false); 5021 CU_ASSERT(rc == 0); 5022 5023 spdk_delay_us(1000); 5024 poll_threads(); 5025 5026 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5027 poll_threads(); 5028 5029 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5030 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5031 5032 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 5033 CU_ASSERT(nvme_ctrlr != NULL); 5034 5035 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5036 CU_ASSERT(bdev != NULL); 5037 5038 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5039 CU_ASSERT(nvme_ns != NULL); 5040 5041 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5042 ut_bdev_io_set_buf(bdev_io); 5043 5044 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5045 5046 ch = spdk_get_io_channel(bdev); 5047 SPDK_CU_ASSERT_FATAL(ch != NULL); 5048 5049 nbdev_ch = spdk_io_channel_get_ctx(ch); 5050 5051 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5052 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5053 5054 nvme_qpair = io_path->qpair; 5055 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5056 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5057 5058 now = spdk_get_ticks(); 5059 5060 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5061 5062 /* If I/O got ANA error, it should be queued, the corresponding namespace 5063 * should be freezed and its ANA state should be updated. 5064 */ 5065 bdev_io->internal.in_submit_request = true; 5066 5067 bdev_nvme_submit_request(ch, bdev_io); 5068 5069 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5070 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5071 5072 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5073 SPDK_CU_ASSERT_FATAL(req != NULL); 5074 5075 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5076 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5077 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5078 5079 poll_thread_times(0, 1); 5080 5081 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5082 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5083 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5084 /* I/O should be retried immediately. */ 5085 CU_ASSERT(bio->retry_ticks == now); 5086 CU_ASSERT(nvme_ns->ana_state_updating == true); 5087 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5088 5089 poll_threads(); 5090 5091 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5092 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5093 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5094 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5095 /* I/O should be retried after a second if no I/O path was found but 5096 * any I/O path may become available. 5097 */ 5098 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5099 5100 /* Namespace should be unfreezed after completing to update its ANA state. */ 5101 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5102 poll_threads(); 5103 5104 CU_ASSERT(nvme_ns->ana_state_updating == false); 5105 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5106 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5107 5108 /* Retry the queued I/O should succeed. */ 5109 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5110 poll_threads(); 5111 5112 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5113 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5114 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5115 5116 free(bdev_io); 5117 5118 spdk_put_io_channel(ch); 5119 5120 poll_threads(); 5121 5122 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5123 CU_ASSERT(rc == 0); 5124 5125 poll_threads(); 5126 spdk_delay_us(1000); 5127 poll_threads(); 5128 5129 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5130 5131 g_opts.bdev_retry_count = 0; 5132 } 5133 5134 static void 5135 test_check_io_error_resiliency_params(void) 5136 { 5137 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5138 * 3rd parameter is fast_io_fail_timeout_sec. 5139 */ 5140 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5141 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5142 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5143 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5144 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5145 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5146 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5147 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5148 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5149 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5150 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5151 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5152 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5153 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5154 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5155 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5156 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5157 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5158 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5159 } 5160 5161 static void 5162 test_retry_io_if_ctrlr_is_resetting(void) 5163 { 5164 struct nvme_path_id path = {}; 5165 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5166 struct spdk_nvme_ctrlr *ctrlr; 5167 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5168 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5169 struct nvme_ctrlr *nvme_ctrlr; 5170 const int STRING_SIZE = 32; 5171 const char *attached_names[STRING_SIZE]; 5172 struct nvme_bdev *bdev; 5173 struct nvme_ns *nvme_ns; 5174 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5175 struct spdk_io_channel *ch; 5176 struct nvme_bdev_channel *nbdev_ch; 5177 struct nvme_io_path *io_path; 5178 struct nvme_qpair *nvme_qpair; 5179 int rc; 5180 5181 g_opts.bdev_retry_count = 1; 5182 5183 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5184 ut_init_trid(&path.trid); 5185 5186 set_thread(0); 5187 5188 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5189 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5190 5191 g_ut_attach_ctrlr_status = 0; 5192 g_ut_attach_bdev_count = 1; 5193 5194 opts.ctrlr_loss_timeout_sec = -1; 5195 opts.reconnect_delay_sec = 1; 5196 5197 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5198 attach_ctrlr_done, NULL, &dopts, &opts, false); 5199 CU_ASSERT(rc == 0); 5200 5201 spdk_delay_us(1000); 5202 poll_threads(); 5203 5204 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5205 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5206 5207 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5208 CU_ASSERT(nvme_ctrlr != NULL); 5209 5210 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5211 CU_ASSERT(bdev != NULL); 5212 5213 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5214 CU_ASSERT(nvme_ns != NULL); 5215 5216 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5217 ut_bdev_io_set_buf(bdev_io1); 5218 5219 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5220 ut_bdev_io_set_buf(bdev_io2); 5221 5222 ch = spdk_get_io_channel(bdev); 5223 SPDK_CU_ASSERT_FATAL(ch != NULL); 5224 5225 nbdev_ch = spdk_io_channel_get_ctx(ch); 5226 5227 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5228 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5229 5230 nvme_qpair = io_path->qpair; 5231 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5232 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5233 5234 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5235 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5236 5237 /* If qpair is connected, I/O should succeed. */ 5238 bdev_io1->internal.in_submit_request = true; 5239 5240 bdev_nvme_submit_request(ch, bdev_io1); 5241 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5242 5243 poll_threads(); 5244 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5245 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5246 5247 /* If qpair is disconnected, it is freed and then reconnected via resetting 5248 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5249 * while resetting the nvme_ctrlr. 5250 */ 5251 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5252 ctrlr->is_failed = true; 5253 5254 poll_thread_times(0, 5); 5255 5256 CU_ASSERT(nvme_qpair->qpair == NULL); 5257 CU_ASSERT(nvme_ctrlr->resetting == true); 5258 CU_ASSERT(ctrlr->is_failed == false); 5259 5260 bdev_io1->internal.in_submit_request = true; 5261 5262 bdev_nvme_submit_request(ch, bdev_io1); 5263 5264 spdk_delay_us(1); 5265 5266 bdev_io2->internal.in_submit_request = true; 5267 5268 bdev_nvme_submit_request(ch, bdev_io2); 5269 5270 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5271 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5272 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5273 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx( 5274 TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx, 5275 retry_link))); 5276 5277 poll_threads(); 5278 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5279 poll_threads(); 5280 5281 CU_ASSERT(nvme_qpair->qpair != NULL); 5282 CU_ASSERT(nvme_ctrlr->resetting == false); 5283 5284 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5285 5286 poll_thread_times(0, 1); 5287 5288 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5289 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5290 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5291 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5292 5293 poll_threads(); 5294 5295 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5296 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5297 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5298 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5299 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5300 5301 spdk_delay_us(1); 5302 5303 poll_thread_times(0, 1); 5304 5305 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5306 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5307 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5308 5309 poll_threads(); 5310 5311 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5312 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5313 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5314 5315 free(bdev_io1); 5316 free(bdev_io2); 5317 5318 spdk_put_io_channel(ch); 5319 5320 poll_threads(); 5321 5322 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5323 CU_ASSERT(rc == 0); 5324 5325 poll_threads(); 5326 spdk_delay_us(1000); 5327 poll_threads(); 5328 5329 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5330 5331 g_opts.bdev_retry_count = 0; 5332 } 5333 5334 static void 5335 test_reconnect_ctrlr(void) 5336 { 5337 struct spdk_nvme_transport_id trid = {}; 5338 struct spdk_nvme_ctrlr ctrlr = {}; 5339 struct nvme_ctrlr *nvme_ctrlr; 5340 struct spdk_io_channel *ch1, *ch2; 5341 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5342 int rc; 5343 5344 ut_init_trid(&trid); 5345 TAILQ_INIT(&ctrlr.active_io_qpairs); 5346 5347 set_thread(0); 5348 5349 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5350 CU_ASSERT(rc == 0); 5351 5352 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5353 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5354 5355 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5356 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5357 5358 ch1 = spdk_get_io_channel(nvme_ctrlr); 5359 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5360 5361 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5362 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5363 5364 set_thread(1); 5365 5366 ch2 = spdk_get_io_channel(nvme_ctrlr); 5367 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5368 5369 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5370 5371 /* Reset starts from thread 1. */ 5372 set_thread(1); 5373 5374 /* The reset should fail and a reconnect timer should be registered. */ 5375 ctrlr.fail_reset = true; 5376 ctrlr.is_failed = true; 5377 5378 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5379 CU_ASSERT(rc == 0); 5380 CU_ASSERT(nvme_ctrlr->resetting == true); 5381 CU_ASSERT(ctrlr.is_failed == true); 5382 5383 poll_threads(); 5384 5385 CU_ASSERT(nvme_ctrlr->resetting == false); 5386 CU_ASSERT(ctrlr.is_failed == false); 5387 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5388 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5389 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5390 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5391 5392 /* A new reset starts from thread 0. */ 5393 set_thread(1); 5394 5395 /* The reset should cancel the reconnect timer and should start from reconnection. 5396 * Then, the reset should fail and a reconnect timer should be registered again. 5397 */ 5398 ctrlr.fail_reset = true; 5399 ctrlr.is_failed = true; 5400 5401 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5402 CU_ASSERT(rc == 0); 5403 CU_ASSERT(nvme_ctrlr->resetting == true); 5404 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5405 CU_ASSERT(ctrlr.is_failed == true); 5406 5407 poll_threads(); 5408 5409 CU_ASSERT(nvme_ctrlr->resetting == false); 5410 CU_ASSERT(ctrlr.is_failed == false); 5411 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5412 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5413 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5414 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5415 5416 /* Then a reconnect retry should suceeed. */ 5417 ctrlr.fail_reset = false; 5418 5419 spdk_delay_us(SPDK_SEC_TO_USEC); 5420 poll_thread_times(0, 1); 5421 5422 CU_ASSERT(nvme_ctrlr->resetting == true); 5423 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5424 5425 poll_threads(); 5426 5427 CU_ASSERT(nvme_ctrlr->resetting == false); 5428 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5429 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5430 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5431 5432 /* The reset should fail and a reconnect timer should be registered. */ 5433 ctrlr.fail_reset = true; 5434 ctrlr.is_failed = true; 5435 5436 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5437 CU_ASSERT(rc == 0); 5438 CU_ASSERT(nvme_ctrlr->resetting == true); 5439 CU_ASSERT(ctrlr.is_failed == true); 5440 5441 poll_threads(); 5442 5443 CU_ASSERT(nvme_ctrlr->resetting == false); 5444 CU_ASSERT(ctrlr.is_failed == false); 5445 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5446 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5447 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5448 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5449 5450 /* Then a reconnect retry should still fail. */ 5451 spdk_delay_us(SPDK_SEC_TO_USEC); 5452 poll_thread_times(0, 1); 5453 5454 CU_ASSERT(nvme_ctrlr->resetting == true); 5455 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5456 5457 poll_threads(); 5458 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5459 poll_threads(); 5460 5461 CU_ASSERT(nvme_ctrlr->resetting == false); 5462 CU_ASSERT(ctrlr.is_failed == false); 5463 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5464 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5465 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5466 5467 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5468 spdk_delay_us(SPDK_SEC_TO_USEC); 5469 poll_threads(); 5470 5471 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5472 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5473 CU_ASSERT(nvme_ctrlr->destruct == true); 5474 5475 spdk_put_io_channel(ch2); 5476 5477 set_thread(0); 5478 5479 spdk_put_io_channel(ch1); 5480 5481 poll_threads(); 5482 spdk_delay_us(1000); 5483 poll_threads(); 5484 5485 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5486 } 5487 5488 static struct nvme_path_id * 5489 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5490 const struct spdk_nvme_transport_id *trid) 5491 { 5492 struct nvme_path_id *p; 5493 5494 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5495 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5496 break; 5497 } 5498 } 5499 5500 return p; 5501 } 5502 5503 static void 5504 test_retry_failover_ctrlr(void) 5505 { 5506 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5507 struct spdk_nvme_ctrlr ctrlr = {}; 5508 struct nvme_ctrlr *nvme_ctrlr = NULL; 5509 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5510 struct spdk_io_channel *ch; 5511 struct nvme_ctrlr_channel *ctrlr_ch; 5512 int rc; 5513 5514 ut_init_trid(&trid1); 5515 ut_init_trid2(&trid2); 5516 ut_init_trid3(&trid3); 5517 TAILQ_INIT(&ctrlr.active_io_qpairs); 5518 5519 set_thread(0); 5520 5521 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5522 CU_ASSERT(rc == 0); 5523 5524 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5525 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5526 5527 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5528 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5529 5530 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5531 CU_ASSERT(rc == 0); 5532 5533 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5534 CU_ASSERT(rc == 0); 5535 5536 ch = spdk_get_io_channel(nvme_ctrlr); 5537 SPDK_CU_ASSERT_FATAL(ch != NULL); 5538 5539 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5540 5541 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5542 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5543 CU_ASSERT(path_id1->last_failed_tsc == 0); 5544 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5545 5546 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5547 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5548 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5549 5550 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5551 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5552 5553 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5554 * and a reconnect timer is started. */ 5555 ctrlr.fail_reset = true; 5556 ctrlr.is_failed = true; 5557 5558 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5559 CU_ASSERT(rc == 0); 5560 5561 poll_threads(); 5562 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5563 poll_threads(); 5564 5565 CU_ASSERT(nvme_ctrlr->resetting == false); 5566 CU_ASSERT(ctrlr.is_failed == false); 5567 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5568 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5569 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5570 CU_ASSERT(path_id1->last_failed_tsc != 0); 5571 5572 CU_ASSERT(path_id2->last_failed_tsc != 0); 5573 CU_ASSERT(path_id3->last_failed_tsc != 0); 5574 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5575 5576 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5577 * switched to trid2 but reset is not started. 5578 */ 5579 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5580 CU_ASSERT(rc == -EALREADY); 5581 5582 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5583 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5584 5585 CU_ASSERT(nvme_ctrlr->resetting == false); 5586 5587 /* If reconnect succeeds, trid2 should be the active path_id */ 5588 ctrlr.fail_reset = false; 5589 5590 spdk_delay_us(SPDK_SEC_TO_USEC); 5591 poll_thread_times(0, 1); 5592 5593 CU_ASSERT(nvme_ctrlr->resetting == true); 5594 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5595 5596 poll_threads(); 5597 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5598 poll_threads(); 5599 5600 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5601 CU_ASSERT(path_id2->last_failed_tsc == 0); 5602 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5603 CU_ASSERT(nvme_ctrlr->resetting == false); 5604 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5605 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5606 5607 spdk_put_io_channel(ch); 5608 5609 poll_threads(); 5610 5611 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5612 CU_ASSERT(rc == 0); 5613 5614 poll_threads(); 5615 spdk_delay_us(1000); 5616 poll_threads(); 5617 5618 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5619 } 5620 5621 static void 5622 test_fail_path(void) 5623 { 5624 struct nvme_path_id path = {}; 5625 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5626 struct spdk_nvme_ctrlr *ctrlr; 5627 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5628 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5629 struct nvme_ctrlr *nvme_ctrlr; 5630 const int STRING_SIZE = 32; 5631 const char *attached_names[STRING_SIZE]; 5632 struct nvme_bdev *bdev; 5633 struct nvme_ns *nvme_ns; 5634 struct spdk_bdev_io *bdev_io; 5635 struct spdk_io_channel *ch; 5636 struct nvme_bdev_channel *nbdev_ch; 5637 struct nvme_io_path *io_path; 5638 struct nvme_ctrlr_channel *ctrlr_ch; 5639 int rc; 5640 5641 /* The test scenario is the following. 5642 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5643 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5644 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5645 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5646 * comes first. The queued I/O is failed. 5647 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5648 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5649 */ 5650 5651 g_opts.bdev_retry_count = 1; 5652 5653 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5654 ut_init_trid(&path.trid); 5655 5656 set_thread(0); 5657 5658 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5659 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5660 5661 g_ut_attach_ctrlr_status = 0; 5662 g_ut_attach_bdev_count = 1; 5663 5664 opts.ctrlr_loss_timeout_sec = 4; 5665 opts.reconnect_delay_sec = 1; 5666 opts.fast_io_fail_timeout_sec = 2; 5667 5668 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5669 attach_ctrlr_done, NULL, &dopts, &opts, false); 5670 CU_ASSERT(rc == 0); 5671 5672 spdk_delay_us(1000); 5673 poll_threads(); 5674 5675 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5676 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5677 5678 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5679 CU_ASSERT(nvme_ctrlr != NULL); 5680 5681 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5682 CU_ASSERT(bdev != NULL); 5683 5684 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5685 CU_ASSERT(nvme_ns != NULL); 5686 5687 ch = spdk_get_io_channel(bdev); 5688 SPDK_CU_ASSERT_FATAL(ch != NULL); 5689 5690 nbdev_ch = spdk_io_channel_get_ctx(ch); 5691 5692 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5693 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5694 5695 ctrlr_ch = io_path->qpair->ctrlr_ch; 5696 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5697 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5698 5699 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5700 ut_bdev_io_set_buf(bdev_io); 5701 5702 5703 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5704 ctrlr->fail_reset = true; 5705 ctrlr->is_failed = true; 5706 5707 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5708 CU_ASSERT(rc == 0); 5709 CU_ASSERT(nvme_ctrlr->resetting == true); 5710 CU_ASSERT(ctrlr->is_failed == true); 5711 5712 poll_threads(); 5713 5714 CU_ASSERT(nvme_ctrlr->resetting == false); 5715 CU_ASSERT(ctrlr->is_failed == false); 5716 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5717 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5718 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5719 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5720 5721 /* I/O should be queued. */ 5722 bdev_io->internal.in_submit_request = true; 5723 5724 bdev_nvme_submit_request(ch, bdev_io); 5725 5726 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5727 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5728 5729 /* After a second, the I/O should be still queued and the ctrlr should be 5730 * still recovering. 5731 */ 5732 spdk_delay_us(SPDK_SEC_TO_USEC); 5733 poll_threads(); 5734 5735 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5736 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5737 5738 CU_ASSERT(nvme_ctrlr->resetting == false); 5739 CU_ASSERT(ctrlr->is_failed == false); 5740 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5741 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5742 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5743 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5744 5745 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5746 5747 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5748 spdk_delay_us(SPDK_SEC_TO_USEC); 5749 poll_threads(); 5750 5751 CU_ASSERT(nvme_ctrlr->resetting == false); 5752 CU_ASSERT(ctrlr->is_failed == false); 5753 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5754 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5755 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5756 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5757 5758 /* Then within a second, pending I/O should be failed. */ 5759 spdk_delay_us(SPDK_SEC_TO_USEC); 5760 poll_threads(); 5761 5762 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5763 poll_threads(); 5764 5765 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5766 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5767 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5768 5769 /* Another I/O submission should be failed immediately. */ 5770 bdev_io->internal.in_submit_request = true; 5771 5772 bdev_nvme_submit_request(ch, bdev_io); 5773 5774 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5775 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5776 5777 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5778 * be deleted. 5779 */ 5780 spdk_delay_us(SPDK_SEC_TO_USEC); 5781 poll_threads(); 5782 5783 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5784 poll_threads(); 5785 5786 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5787 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5788 CU_ASSERT(nvme_ctrlr->destruct == true); 5789 5790 spdk_put_io_channel(ch); 5791 5792 poll_threads(); 5793 spdk_delay_us(1000); 5794 poll_threads(); 5795 5796 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5797 5798 free(bdev_io); 5799 5800 g_opts.bdev_retry_count = 0; 5801 } 5802 5803 static void 5804 test_nvme_ns_cmp(void) 5805 { 5806 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5807 5808 nvme_ns1.id = 0; 5809 nvme_ns2.id = UINT32_MAX; 5810 5811 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5812 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5813 } 5814 5815 static void 5816 test_ana_transition(void) 5817 { 5818 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5819 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5820 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5821 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5822 5823 /* case 1: ANA transition timedout is canceled. */ 5824 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5825 nvme_ns.ana_transition_timedout = true; 5826 5827 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5828 5829 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5830 5831 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5832 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5833 5834 /* case 2: ANATT timer is kept. */ 5835 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5836 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5837 &nvme_ns, 5838 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5839 5840 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5841 5842 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5843 5844 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5845 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5846 5847 /* case 3: ANATT timer is stopped. */ 5848 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5849 5850 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5851 5852 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5853 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5854 5855 /* ANATT timer is started. */ 5856 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5857 5858 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5859 5860 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5861 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5862 5863 /* ANATT timer is expired. */ 5864 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5865 5866 poll_threads(); 5867 5868 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5869 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5870 } 5871 5872 static void 5873 _set_preferred_path_cb(void *cb_arg, int rc) 5874 { 5875 bool *done = cb_arg; 5876 5877 *done = true; 5878 } 5879 5880 static void 5881 test_set_preferred_path(void) 5882 { 5883 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5884 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5885 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5886 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5887 const int STRING_SIZE = 32; 5888 const char *attached_names[STRING_SIZE]; 5889 struct nvme_bdev *bdev; 5890 struct spdk_io_channel *ch; 5891 struct nvme_bdev_channel *nbdev_ch; 5892 struct nvme_io_path *io_path; 5893 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5894 const struct spdk_nvme_ctrlr_data *cdata; 5895 bool done; 5896 int rc; 5897 5898 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5899 ut_init_trid(&path1.trid); 5900 ut_init_trid2(&path2.trid); 5901 ut_init_trid3(&path3.trid); 5902 g_ut_attach_ctrlr_status = 0; 5903 g_ut_attach_bdev_count = 1; 5904 5905 set_thread(0); 5906 5907 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5908 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5909 5910 ctrlr1->ns[0].uuid = &uuid1; 5911 5912 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5913 attach_ctrlr_done, NULL, &opts, NULL, true); 5914 CU_ASSERT(rc == 0); 5915 5916 spdk_delay_us(1000); 5917 poll_threads(); 5918 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5919 poll_threads(); 5920 5921 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5922 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5923 5924 ctrlr2->ns[0].uuid = &uuid1; 5925 5926 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5927 attach_ctrlr_done, NULL, &opts, NULL, true); 5928 CU_ASSERT(rc == 0); 5929 5930 spdk_delay_us(1000); 5931 poll_threads(); 5932 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5933 poll_threads(); 5934 5935 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5936 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5937 5938 ctrlr3->ns[0].uuid = &uuid1; 5939 5940 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5941 attach_ctrlr_done, NULL, &opts, NULL, true); 5942 CU_ASSERT(rc == 0); 5943 5944 spdk_delay_us(1000); 5945 poll_threads(); 5946 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5947 poll_threads(); 5948 5949 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5950 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5951 5952 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5953 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5954 5955 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5956 5957 ch = spdk_get_io_channel(bdev); 5958 SPDK_CU_ASSERT_FATAL(ch != NULL); 5959 nbdev_ch = spdk_io_channel_get_ctx(ch); 5960 5961 io_path = bdev_nvme_find_io_path(nbdev_ch); 5962 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5963 5964 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5965 5966 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5967 * should return io_path to ctrlr2. 5968 */ 5969 5970 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5971 done = false; 5972 5973 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5974 5975 poll_threads(); 5976 CU_ASSERT(done == true); 5977 5978 io_path = bdev_nvme_find_io_path(nbdev_ch); 5979 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5980 5981 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5982 5983 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5984 * acquired, find_io_path() should return io_path to ctrlr3. 5985 */ 5986 5987 spdk_put_io_channel(ch); 5988 5989 poll_threads(); 5990 5991 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5992 done = false; 5993 5994 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5995 5996 poll_threads(); 5997 CU_ASSERT(done == true); 5998 5999 ch = spdk_get_io_channel(bdev); 6000 SPDK_CU_ASSERT_FATAL(ch != NULL); 6001 nbdev_ch = spdk_io_channel_get_ctx(ch); 6002 6003 io_path = bdev_nvme_find_io_path(nbdev_ch); 6004 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6005 6006 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 6007 6008 spdk_put_io_channel(ch); 6009 6010 poll_threads(); 6011 6012 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6013 CU_ASSERT(rc == 0); 6014 6015 poll_threads(); 6016 spdk_delay_us(1000); 6017 poll_threads(); 6018 6019 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6020 } 6021 6022 static void 6023 test_find_next_io_path(void) 6024 { 6025 struct nvme_bdev_channel nbdev_ch = { 6026 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6027 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6028 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 6029 }; 6030 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6031 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6032 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6033 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6034 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6035 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6036 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6037 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6038 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6039 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6040 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6041 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6042 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6043 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6044 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6045 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6046 6047 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6048 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6049 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6050 6051 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6052 * is covered in test_find_io_path. 6053 */ 6054 6055 nbdev_ch.current_io_path = &io_path2; 6056 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6057 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6058 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6059 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6060 6061 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6062 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6063 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6064 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6065 6066 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6067 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6068 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6069 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6070 6071 nbdev_ch.current_io_path = &io_path3; 6072 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6073 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6074 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6075 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6076 6077 /* Test if next io_path is selected according to rr_min_io */ 6078 6079 nbdev_ch.current_io_path = NULL; 6080 nbdev_ch.rr_min_io = 2; 6081 nbdev_ch.rr_counter = 0; 6082 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6083 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6084 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6085 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6086 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6087 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6088 6089 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6090 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6091 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6092 } 6093 6094 static void 6095 test_find_io_path_min_qd(void) 6096 { 6097 struct nvme_bdev_channel nbdev_ch = { 6098 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6099 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6100 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6101 }; 6102 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6103 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6104 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6105 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6106 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6107 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6108 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6109 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6110 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6111 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6112 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6113 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6114 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6115 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6116 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6117 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6118 6119 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6120 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6121 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6122 6123 /* Test if the minimum io_outstanding or the ANA optimized state is 6124 * prioritized when using least queue depth selector 6125 */ 6126 qpair1.num_outstanding_reqs = 2; 6127 qpair2.num_outstanding_reqs = 1; 6128 qpair3.num_outstanding_reqs = 0; 6129 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6130 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6131 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6132 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6133 6134 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6135 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6136 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6137 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6138 6139 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6140 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6141 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6142 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6143 6144 qpair2.num_outstanding_reqs = 4; 6145 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6146 } 6147 6148 static void 6149 test_disable_auto_failback(void) 6150 { 6151 struct nvme_path_id path1 = {}, path2 = {}; 6152 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6153 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6154 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6155 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6156 struct nvme_ctrlr *nvme_ctrlr1; 6157 const int STRING_SIZE = 32; 6158 const char *attached_names[STRING_SIZE]; 6159 struct nvme_bdev *bdev; 6160 struct spdk_io_channel *ch; 6161 struct nvme_bdev_channel *nbdev_ch; 6162 struct nvme_io_path *io_path; 6163 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6164 const struct spdk_nvme_ctrlr_data *cdata; 6165 bool done; 6166 int rc; 6167 6168 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6169 ut_init_trid(&path1.trid); 6170 ut_init_trid2(&path2.trid); 6171 g_ut_attach_ctrlr_status = 0; 6172 g_ut_attach_bdev_count = 1; 6173 6174 g_opts.disable_auto_failback = true; 6175 6176 opts.ctrlr_loss_timeout_sec = -1; 6177 opts.reconnect_delay_sec = 1; 6178 6179 set_thread(0); 6180 6181 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6182 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6183 6184 ctrlr1->ns[0].uuid = &uuid1; 6185 6186 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6187 attach_ctrlr_done, NULL, &dopts, &opts, true); 6188 CU_ASSERT(rc == 0); 6189 6190 spdk_delay_us(1000); 6191 poll_threads(); 6192 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6193 poll_threads(); 6194 6195 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6196 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6197 6198 ctrlr2->ns[0].uuid = &uuid1; 6199 6200 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6201 attach_ctrlr_done, NULL, &dopts, &opts, true); 6202 CU_ASSERT(rc == 0); 6203 6204 spdk_delay_us(1000); 6205 poll_threads(); 6206 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6207 poll_threads(); 6208 6209 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6210 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6211 6212 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6213 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6214 6215 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn); 6216 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6217 6218 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6219 6220 ch = spdk_get_io_channel(bdev); 6221 SPDK_CU_ASSERT_FATAL(ch != NULL); 6222 nbdev_ch = spdk_io_channel_get_ctx(ch); 6223 6224 io_path = bdev_nvme_find_io_path(nbdev_ch); 6225 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6226 6227 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6228 6229 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6230 ctrlr1->fail_reset = true; 6231 ctrlr1->is_failed = true; 6232 6233 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6234 6235 poll_threads(); 6236 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6237 poll_threads(); 6238 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6239 poll_threads(); 6240 6241 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6242 6243 io_path = bdev_nvme_find_io_path(nbdev_ch); 6244 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6245 6246 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6247 6248 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6249 * Hence, io_path to ctrlr2 should still be used. 6250 */ 6251 ctrlr1->fail_reset = false; 6252 6253 spdk_delay_us(SPDK_SEC_TO_USEC); 6254 poll_threads(); 6255 6256 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6257 6258 io_path = bdev_nvme_find_io_path(nbdev_ch); 6259 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6260 6261 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6262 6263 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6264 * be used again. 6265 */ 6266 6267 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6268 done = false; 6269 6270 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6271 6272 poll_threads(); 6273 CU_ASSERT(done == true); 6274 6275 io_path = bdev_nvme_find_io_path(nbdev_ch); 6276 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6277 6278 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6279 6280 spdk_put_io_channel(ch); 6281 6282 poll_threads(); 6283 6284 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6285 CU_ASSERT(rc == 0); 6286 6287 poll_threads(); 6288 spdk_delay_us(1000); 6289 poll_threads(); 6290 6291 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6292 6293 g_opts.disable_auto_failback = false; 6294 } 6295 6296 static void 6297 ut_set_multipath_policy_done(void *cb_arg, int rc) 6298 { 6299 int *done = cb_arg; 6300 6301 SPDK_CU_ASSERT_FATAL(done != NULL); 6302 *done = rc; 6303 } 6304 6305 static void 6306 test_set_multipath_policy(void) 6307 { 6308 struct nvme_path_id path1 = {}, path2 = {}; 6309 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6310 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6311 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6312 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6313 const int STRING_SIZE = 32; 6314 const char *attached_names[STRING_SIZE]; 6315 struct nvme_bdev *bdev; 6316 struct spdk_io_channel *ch; 6317 struct nvme_bdev_channel *nbdev_ch; 6318 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6319 int done; 6320 int rc; 6321 6322 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6323 ut_init_trid(&path1.trid); 6324 ut_init_trid2(&path2.trid); 6325 g_ut_attach_ctrlr_status = 0; 6326 g_ut_attach_bdev_count = 1; 6327 6328 g_opts.disable_auto_failback = true; 6329 6330 opts.ctrlr_loss_timeout_sec = -1; 6331 opts.reconnect_delay_sec = 1; 6332 6333 set_thread(0); 6334 6335 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6336 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6337 6338 ctrlr1->ns[0].uuid = &uuid1; 6339 6340 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6341 attach_ctrlr_done, NULL, &dopts, &opts, true); 6342 CU_ASSERT(rc == 0); 6343 6344 spdk_delay_us(1000); 6345 poll_threads(); 6346 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6347 poll_threads(); 6348 6349 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6350 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6351 6352 ctrlr2->ns[0].uuid = &uuid1; 6353 6354 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6355 attach_ctrlr_done, NULL, &dopts, &opts, true); 6356 CU_ASSERT(rc == 0); 6357 6358 spdk_delay_us(1000); 6359 poll_threads(); 6360 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6361 poll_threads(); 6362 6363 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6364 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6365 6366 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6367 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6368 6369 /* If multipath policy is updated before getting any I/O channel, 6370 * an new I/O channel should have the update. 6371 */ 6372 done = -1; 6373 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6374 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6375 ut_set_multipath_policy_done, &done); 6376 poll_threads(); 6377 CU_ASSERT(done == 0); 6378 6379 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6380 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6381 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6382 6383 ch = spdk_get_io_channel(bdev); 6384 SPDK_CU_ASSERT_FATAL(ch != NULL); 6385 nbdev_ch = spdk_io_channel_get_ctx(ch); 6386 6387 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6388 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6389 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6390 6391 /* If multipath policy is updated while a I/O channel is active, 6392 * the update should be applied to the I/O channel immediately. 6393 */ 6394 done = -1; 6395 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6396 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6397 ut_set_multipath_policy_done, &done); 6398 poll_threads(); 6399 CU_ASSERT(done == 0); 6400 6401 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6402 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6403 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6404 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6405 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6406 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6407 6408 spdk_put_io_channel(ch); 6409 6410 poll_threads(); 6411 6412 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6413 CU_ASSERT(rc == 0); 6414 6415 poll_threads(); 6416 spdk_delay_us(1000); 6417 poll_threads(); 6418 6419 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6420 } 6421 6422 static void 6423 test_uuid_generation(void) 6424 { 6425 uint32_t nsid1 = 1, nsid2 = 2; 6426 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6427 char sn3[21] = " "; 6428 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6429 struct spdk_uuid uuid1, uuid2; 6430 int rc; 6431 6432 /* Test case 1: 6433 * Serial numbers are the same, nsids are different. 6434 * Compare two generated UUID - they should be different. */ 6435 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6436 CU_ASSERT(rc == 0); 6437 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6438 CU_ASSERT(rc == 0); 6439 6440 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6441 6442 /* Test case 2: 6443 * Serial numbers differ only by one character, nsids are the same. 6444 * Compare two generated UUID - they should be different. */ 6445 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6446 CU_ASSERT(rc == 0); 6447 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6448 CU_ASSERT(rc == 0); 6449 6450 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6451 6452 /* Test case 3: 6453 * Serial number comprises only of space characters. 6454 * Validate the generated UUID. */ 6455 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6456 CU_ASSERT(rc == 0); 6457 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6458 6459 } 6460 6461 static void 6462 test_retry_io_to_same_path(void) 6463 { 6464 struct nvme_path_id path1 = {}, path2 = {}; 6465 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6466 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 6467 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6468 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6469 const int STRING_SIZE = 32; 6470 const char *attached_names[STRING_SIZE]; 6471 struct nvme_bdev *bdev; 6472 struct spdk_bdev_io *bdev_io; 6473 struct nvme_bdev_io *bio; 6474 struct spdk_io_channel *ch; 6475 struct nvme_bdev_channel *nbdev_ch; 6476 struct nvme_io_path *io_path1, *io_path2; 6477 struct ut_nvme_req *req; 6478 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6479 int done; 6480 int rc; 6481 6482 g_opts.nvme_ioq_poll_period_us = 1; 6483 6484 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6485 ut_init_trid(&path1.trid); 6486 ut_init_trid2(&path2.trid); 6487 g_ut_attach_ctrlr_status = 0; 6488 g_ut_attach_bdev_count = 1; 6489 6490 set_thread(0); 6491 6492 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6493 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6494 6495 ctrlr1->ns[0].uuid = &uuid1; 6496 6497 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6498 attach_ctrlr_done, NULL, &opts, NULL, true); 6499 CU_ASSERT(rc == 0); 6500 6501 spdk_delay_us(1000); 6502 poll_threads(); 6503 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6504 poll_threads(); 6505 6506 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6507 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6508 6509 ctrlr2->ns[0].uuid = &uuid1; 6510 6511 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6512 attach_ctrlr_done, NULL, &opts, NULL, true); 6513 CU_ASSERT(rc == 0); 6514 6515 spdk_delay_us(1000); 6516 poll_threads(); 6517 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6518 poll_threads(); 6519 6520 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6521 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6522 6523 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 6524 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6525 6526 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 6527 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6528 6529 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6530 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6531 6532 done = -1; 6533 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6534 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6535 poll_threads(); 6536 CU_ASSERT(done == 0); 6537 6538 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6539 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6540 CU_ASSERT(bdev->rr_min_io == 1); 6541 6542 ch = spdk_get_io_channel(bdev); 6543 SPDK_CU_ASSERT_FATAL(ch != NULL); 6544 nbdev_ch = spdk_io_channel_get_ctx(ch); 6545 6546 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6547 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6548 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6549 6550 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6551 ut_bdev_io_set_buf(bdev_io); 6552 6553 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6554 6555 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6556 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6557 6558 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6559 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6560 6561 /* The 1st I/O should be submitted to io_path1. */ 6562 bdev_io->internal.in_submit_request = true; 6563 6564 bdev_nvme_submit_request(ch, bdev_io); 6565 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6566 CU_ASSERT(bio->io_path == io_path1); 6567 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6568 6569 spdk_delay_us(1); 6570 6571 poll_threads(); 6572 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6573 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6574 6575 /* The 2nd I/O should be submitted to io_path2 because the path selection 6576 * policy is round-robin. 6577 */ 6578 bdev_io->internal.in_submit_request = true; 6579 6580 bdev_nvme_submit_request(ch, bdev_io); 6581 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6582 CU_ASSERT(bio->io_path == io_path2); 6583 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6584 6585 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6586 SPDK_CU_ASSERT_FATAL(req != NULL); 6587 6588 /* Set retry count to non-zero. */ 6589 g_opts.bdev_retry_count = 2; 6590 6591 /* Inject an I/O error. */ 6592 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6593 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6594 6595 /* The 2nd I/O should be queued to nbdev_ch. */ 6596 spdk_delay_us(1); 6597 poll_thread_times(0, 1); 6598 6599 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6600 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6601 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6602 6603 /* The 2nd I/O should keep caching io_path2. */ 6604 CU_ASSERT(bio->io_path == io_path2); 6605 6606 /* The 2nd I/O should be submitted to io_path2 again. */ 6607 poll_thread_times(0, 1); 6608 6609 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6610 CU_ASSERT(bio->io_path == io_path2); 6611 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6612 6613 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6614 SPDK_CU_ASSERT_FATAL(req != NULL); 6615 6616 /* Inject an I/O error again. */ 6617 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6618 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6619 req->cpl.status.crd = 1; 6620 6621 ctrlr2->cdata.crdt[1] = 1; 6622 6623 /* The 2nd I/O should be queued to nbdev_ch. */ 6624 spdk_delay_us(1); 6625 poll_thread_times(0, 1); 6626 6627 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6628 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6629 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6630 6631 /* The 2nd I/O should keep caching io_path2. */ 6632 CU_ASSERT(bio->io_path == io_path2); 6633 6634 /* Detach ctrlr2 dynamically. */ 6635 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6636 CU_ASSERT(rc == 0); 6637 6638 spdk_delay_us(1000); 6639 poll_threads(); 6640 spdk_delay_us(1000); 6641 poll_threads(); 6642 spdk_delay_us(1000); 6643 poll_threads(); 6644 spdk_delay_us(1000); 6645 poll_threads(); 6646 6647 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 6648 6649 poll_threads(); 6650 spdk_delay_us(100000); 6651 poll_threads(); 6652 spdk_delay_us(1); 6653 poll_threads(); 6654 6655 /* The 2nd I/O should succeed by io_path1. */ 6656 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6657 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6658 CU_ASSERT(bio->io_path == io_path1); 6659 6660 free(bdev_io); 6661 6662 spdk_put_io_channel(ch); 6663 6664 poll_threads(); 6665 spdk_delay_us(1); 6666 poll_threads(); 6667 6668 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6669 CU_ASSERT(rc == 0); 6670 6671 poll_threads(); 6672 spdk_delay_us(1000); 6673 poll_threads(); 6674 6675 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6676 6677 g_opts.nvme_ioq_poll_period_us = 0; 6678 g_opts.bdev_retry_count = 0; 6679 } 6680 6681 /* This case is to verify a fix for a complex race condition that 6682 * failover is lost if fabric connect command gets timeout while 6683 * controller is being reset. 6684 */ 6685 static void 6686 test_race_between_reset_and_disconnected(void) 6687 { 6688 struct spdk_nvme_transport_id trid = {}; 6689 struct spdk_nvme_ctrlr ctrlr = {}; 6690 struct nvme_ctrlr *nvme_ctrlr = NULL; 6691 struct nvme_path_id *curr_trid; 6692 struct spdk_io_channel *ch1, *ch2; 6693 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6694 int rc; 6695 6696 ut_init_trid(&trid); 6697 TAILQ_INIT(&ctrlr.active_io_qpairs); 6698 6699 set_thread(0); 6700 6701 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6702 CU_ASSERT(rc == 0); 6703 6704 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6705 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6706 6707 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6708 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6709 6710 ch1 = spdk_get_io_channel(nvme_ctrlr); 6711 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6712 6713 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6714 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6715 6716 set_thread(1); 6717 6718 ch2 = spdk_get_io_channel(nvme_ctrlr); 6719 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6720 6721 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6722 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6723 6724 /* Reset starts from thread 1. */ 6725 set_thread(1); 6726 6727 nvme_ctrlr->resetting = false; 6728 curr_trid->last_failed_tsc = spdk_get_ticks(); 6729 ctrlr.is_failed = true; 6730 6731 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6732 CU_ASSERT(rc == 0); 6733 CU_ASSERT(nvme_ctrlr->resetting == true); 6734 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6735 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6736 6737 poll_thread_times(0, 3); 6738 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6739 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6740 6741 poll_thread_times(0, 1); 6742 poll_thread_times(1, 1); 6743 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6744 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6745 CU_ASSERT(ctrlr.is_failed == true); 6746 6747 poll_thread_times(1, 1); 6748 poll_thread_times(0, 1); 6749 CU_ASSERT(ctrlr.is_failed == false); 6750 CU_ASSERT(ctrlr.adminq.is_connected == false); 6751 6752 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6753 poll_thread_times(0, 2); 6754 CU_ASSERT(ctrlr.adminq.is_connected == true); 6755 6756 poll_thread_times(0, 1); 6757 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6758 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6759 6760 poll_thread_times(1, 1); 6761 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6762 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6763 CU_ASSERT(nvme_ctrlr->resetting == true); 6764 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6765 6766 poll_thread_times(0, 2); 6767 CU_ASSERT(nvme_ctrlr->resetting == true); 6768 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6769 poll_thread_times(1, 1); 6770 CU_ASSERT(nvme_ctrlr->resetting == true); 6771 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6772 6773 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6774 * 6775 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6776 * connect command is executed. If fabric connect command gets timeout, 6777 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6778 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6779 * 6780 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6781 */ 6782 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6783 CU_ASSERT(rc == -EINPROGRESS); 6784 CU_ASSERT(nvme_ctrlr->resetting == true); 6785 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6786 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6787 6788 poll_thread_times(0, 1); 6789 6790 CU_ASSERT(nvme_ctrlr->resetting == true); 6791 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6792 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6793 6794 poll_threads(); 6795 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6796 poll_threads(); 6797 6798 CU_ASSERT(nvme_ctrlr->resetting == false); 6799 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6800 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6801 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6802 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6803 6804 spdk_put_io_channel(ch2); 6805 6806 set_thread(0); 6807 6808 spdk_put_io_channel(ch1); 6809 6810 poll_threads(); 6811 6812 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6813 CU_ASSERT(rc == 0); 6814 6815 poll_threads(); 6816 spdk_delay_us(1000); 6817 poll_threads(); 6818 6819 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6820 } 6821 static void 6822 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6823 { 6824 int *_rc = (int *)cb_arg; 6825 6826 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6827 *_rc = rc; 6828 } 6829 6830 static void 6831 test_ctrlr_op_rpc(void) 6832 { 6833 struct spdk_nvme_transport_id trid = {}; 6834 struct spdk_nvme_ctrlr ctrlr = {}; 6835 struct nvme_ctrlr *nvme_ctrlr = NULL; 6836 struct nvme_path_id *curr_trid; 6837 struct spdk_io_channel *ch1, *ch2; 6838 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6839 int ctrlr_op_rc; 6840 int rc; 6841 6842 ut_init_trid(&trid); 6843 TAILQ_INIT(&ctrlr.active_io_qpairs); 6844 6845 set_thread(0); 6846 6847 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6848 CU_ASSERT(rc == 0); 6849 6850 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6851 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6852 6853 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6854 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6855 6856 ch1 = spdk_get_io_channel(nvme_ctrlr); 6857 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6858 6859 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6860 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6861 6862 set_thread(1); 6863 6864 ch2 = spdk_get_io_channel(nvme_ctrlr); 6865 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6866 6867 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6868 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6869 6870 /* Reset starts from thread 1. */ 6871 set_thread(1); 6872 6873 /* Case 1: ctrlr is already being destructed. */ 6874 nvme_ctrlr->destruct = true; 6875 ctrlr_op_rc = 0; 6876 6877 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6878 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6879 6880 poll_threads(); 6881 6882 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6883 6884 /* Case 2: reset is in progress. */ 6885 nvme_ctrlr->destruct = false; 6886 nvme_ctrlr->resetting = true; 6887 ctrlr_op_rc = 0; 6888 6889 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6890 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6891 6892 poll_threads(); 6893 6894 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6895 6896 /* Case 3: reset completes successfully. */ 6897 nvme_ctrlr->resetting = false; 6898 curr_trid->last_failed_tsc = spdk_get_ticks(); 6899 ctrlr.is_failed = true; 6900 ctrlr_op_rc = -1; 6901 6902 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6903 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6904 6905 CU_ASSERT(nvme_ctrlr->resetting == true); 6906 CU_ASSERT(ctrlr_op_rc == -1); 6907 6908 poll_threads(); 6909 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6910 poll_threads(); 6911 6912 CU_ASSERT(nvme_ctrlr->resetting == false); 6913 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6914 CU_ASSERT(ctrlr.is_failed == false); 6915 CU_ASSERT(ctrlr_op_rc == 0); 6916 6917 /* Case 4: invalid operation. */ 6918 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6919 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6920 6921 poll_threads(); 6922 6923 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6924 6925 spdk_put_io_channel(ch2); 6926 6927 set_thread(0); 6928 6929 spdk_put_io_channel(ch1); 6930 6931 poll_threads(); 6932 6933 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6934 CU_ASSERT(rc == 0); 6935 6936 poll_threads(); 6937 spdk_delay_us(1000); 6938 poll_threads(); 6939 6940 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6941 } 6942 6943 static void 6944 test_bdev_ctrlr_op_rpc(void) 6945 { 6946 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6947 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6948 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6949 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6950 struct nvme_path_id *curr_trid1, *curr_trid2; 6951 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6952 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6953 int ctrlr_op_rc; 6954 int rc; 6955 6956 ut_init_trid(&trid1); 6957 ut_init_trid2(&trid2); 6958 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6959 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6960 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6961 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6962 ctrlr1.cdata.cntlid = 1; 6963 ctrlr2.cdata.cntlid = 2; 6964 ctrlr1.adminq.is_connected = true; 6965 ctrlr2.adminq.is_connected = true; 6966 6967 set_thread(0); 6968 6969 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 6970 CU_ASSERT(rc == 0); 6971 6972 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6973 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6974 6975 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN); 6976 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6977 6978 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 6979 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 6980 6981 ch11 = spdk_get_io_channel(nvme_ctrlr1); 6982 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 6983 6984 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 6985 CU_ASSERT(ctrlr_ch11->qpair != NULL); 6986 6987 set_thread(1); 6988 6989 ch12 = spdk_get_io_channel(nvme_ctrlr1); 6990 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 6991 6992 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 6993 CU_ASSERT(ctrlr_ch12->qpair != NULL); 6994 6995 set_thread(0); 6996 6997 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 6998 CU_ASSERT(rc == 0); 6999 7000 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN); 7001 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 7002 7003 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 7004 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 7005 7006 ch21 = spdk_get_io_channel(nvme_ctrlr2); 7007 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 7008 7009 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 7010 CU_ASSERT(ctrlr_ch21->qpair != NULL); 7011 7012 set_thread(1); 7013 7014 ch22 = spdk_get_io_channel(nvme_ctrlr2); 7015 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 7016 7017 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 7018 CU_ASSERT(ctrlr_ch22->qpair != NULL); 7019 7020 /* Reset starts from thread 1. */ 7021 set_thread(1); 7022 7023 nvme_ctrlr1->resetting = false; 7024 nvme_ctrlr2->resetting = false; 7025 curr_trid1->last_failed_tsc = spdk_get_ticks(); 7026 curr_trid2->last_failed_tsc = spdk_get_ticks(); 7027 ctrlr_op_rc = -1; 7028 7029 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 7030 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 7031 7032 CU_ASSERT(nvme_ctrlr1->resetting == true); 7033 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7034 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7035 CU_ASSERT(nvme_ctrlr2->resetting == false); 7036 7037 poll_thread_times(0, 3); 7038 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7039 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7040 7041 poll_thread_times(0, 1); 7042 poll_thread_times(1, 1); 7043 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7044 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7045 7046 poll_thread_times(1, 1); 7047 poll_thread_times(0, 1); 7048 CU_ASSERT(ctrlr1.adminq.is_connected == false); 7049 7050 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7051 poll_thread_times(0, 2); 7052 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7053 7054 poll_thread_times(0, 1); 7055 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7056 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7057 7058 poll_thread_times(1, 1); 7059 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7060 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7061 CU_ASSERT(nvme_ctrlr1->resetting == true); 7062 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7063 7064 poll_thread_times(0, 2); 7065 poll_thread_times(1, 1); 7066 poll_thread_times(0, 1); 7067 poll_thread_times(1, 1); 7068 poll_thread_times(0, 1); 7069 poll_thread_times(1, 1); 7070 poll_thread_times(0, 1); 7071 7072 CU_ASSERT(nvme_ctrlr1->resetting == false); 7073 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7074 CU_ASSERT(nvme_ctrlr2->resetting == true); 7075 7076 poll_threads(); 7077 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7078 poll_threads(); 7079 7080 CU_ASSERT(nvme_ctrlr2->resetting == false); 7081 CU_ASSERT(ctrlr_op_rc == 0); 7082 7083 set_thread(1); 7084 7085 spdk_put_io_channel(ch12); 7086 spdk_put_io_channel(ch22); 7087 7088 set_thread(0); 7089 7090 spdk_put_io_channel(ch11); 7091 spdk_put_io_channel(ch21); 7092 7093 poll_threads(); 7094 7095 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7096 CU_ASSERT(rc == 0); 7097 7098 poll_threads(); 7099 spdk_delay_us(1000); 7100 poll_threads(); 7101 7102 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7103 } 7104 7105 static void 7106 test_disable_enable_ctrlr(void) 7107 { 7108 struct spdk_nvme_transport_id trid = {}; 7109 struct spdk_nvme_ctrlr ctrlr = {}; 7110 struct nvme_ctrlr *nvme_ctrlr = NULL; 7111 struct nvme_path_id *curr_trid; 7112 struct spdk_io_channel *ch1, *ch2; 7113 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7114 int rc; 7115 7116 ut_init_trid(&trid); 7117 TAILQ_INIT(&ctrlr.active_io_qpairs); 7118 ctrlr.adminq.is_connected = true; 7119 7120 set_thread(0); 7121 7122 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7123 CU_ASSERT(rc == 0); 7124 7125 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7126 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7127 7128 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7129 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7130 7131 ch1 = spdk_get_io_channel(nvme_ctrlr); 7132 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7133 7134 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7135 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7136 7137 set_thread(1); 7138 7139 ch2 = spdk_get_io_channel(nvme_ctrlr); 7140 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7141 7142 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7143 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7144 7145 /* Disable starts from thread 1. */ 7146 set_thread(1); 7147 7148 /* Case 1: ctrlr is already disabled. */ 7149 nvme_ctrlr->disabled = true; 7150 7151 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7152 CU_ASSERT(rc == -EALREADY); 7153 7154 /* Case 2: ctrlr is already being destructed. */ 7155 nvme_ctrlr->disabled = false; 7156 nvme_ctrlr->destruct = true; 7157 7158 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7159 CU_ASSERT(rc == -ENXIO); 7160 7161 /* Case 3: reset is in progress. */ 7162 nvme_ctrlr->destruct = false; 7163 nvme_ctrlr->resetting = true; 7164 7165 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7166 CU_ASSERT(rc == -EBUSY); 7167 7168 /* Case 4: disable completes successfully. */ 7169 nvme_ctrlr->resetting = false; 7170 7171 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7172 CU_ASSERT(rc == 0); 7173 CU_ASSERT(nvme_ctrlr->resetting == true); 7174 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7175 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7176 7177 poll_thread_times(0, 3); 7178 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7179 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7180 7181 poll_thread_times(0, 1); 7182 poll_thread_times(1, 1); 7183 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7184 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7185 7186 poll_thread_times(1, 1); 7187 poll_thread_times(0, 1); 7188 CU_ASSERT(ctrlr.adminq.is_connected == false); 7189 poll_thread_times(1, 1); 7190 poll_thread_times(0, 1); 7191 poll_thread_times(1, 1); 7192 poll_thread_times(0, 1); 7193 CU_ASSERT(nvme_ctrlr->resetting == false); 7194 CU_ASSERT(nvme_ctrlr->disabled == true); 7195 7196 /* Case 5: enable completes successfully. */ 7197 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7198 CU_ASSERT(rc == 0); 7199 7200 CU_ASSERT(nvme_ctrlr->resetting == true); 7201 CU_ASSERT(nvme_ctrlr->disabled == false); 7202 7203 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7204 poll_thread_times(0, 2); 7205 CU_ASSERT(ctrlr.adminq.is_connected == true); 7206 7207 poll_thread_times(0, 1); 7208 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7209 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7210 7211 poll_thread_times(1, 1); 7212 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7213 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7214 CU_ASSERT(nvme_ctrlr->resetting == true); 7215 7216 poll_thread_times(0, 2); 7217 CU_ASSERT(nvme_ctrlr->resetting == true); 7218 poll_thread_times(1, 1); 7219 CU_ASSERT(nvme_ctrlr->resetting == true); 7220 poll_thread_times(0, 1); 7221 CU_ASSERT(nvme_ctrlr->resetting == false); 7222 7223 /* Case 6: ctrlr is already enabled. */ 7224 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7225 CU_ASSERT(rc == -EALREADY); 7226 7227 set_thread(0); 7228 7229 /* Case 7: disable cancels delayed reconnect. */ 7230 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7231 ctrlr.fail_reset = true; 7232 7233 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7234 CU_ASSERT(rc == 0); 7235 7236 poll_threads(); 7237 7238 CU_ASSERT(nvme_ctrlr->resetting == false); 7239 CU_ASSERT(ctrlr.is_failed == false); 7240 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7241 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7242 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7243 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7244 7245 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7246 CU_ASSERT(rc == 0); 7247 7248 CU_ASSERT(nvme_ctrlr->resetting == true); 7249 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7250 7251 poll_threads(); 7252 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7253 poll_threads(); 7254 7255 CU_ASSERT(nvme_ctrlr->resetting == false); 7256 CU_ASSERT(nvme_ctrlr->disabled == true); 7257 7258 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7259 CU_ASSERT(rc == 0); 7260 7261 CU_ASSERT(nvme_ctrlr->resetting == true); 7262 CU_ASSERT(nvme_ctrlr->disabled == false); 7263 7264 poll_threads(); 7265 7266 CU_ASSERT(nvme_ctrlr->resetting == false); 7267 7268 set_thread(1); 7269 7270 spdk_put_io_channel(ch2); 7271 7272 set_thread(0); 7273 7274 spdk_put_io_channel(ch1); 7275 7276 poll_threads(); 7277 7278 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7279 CU_ASSERT(rc == 0); 7280 7281 poll_threads(); 7282 spdk_delay_us(1000); 7283 poll_threads(); 7284 7285 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7286 } 7287 7288 static void 7289 ut_delete_done(void *ctx, int rc) 7290 { 7291 int *delete_done_rc = ctx; 7292 *delete_done_rc = rc; 7293 } 7294 7295 static void 7296 test_delete_ctrlr_done(void) 7297 { 7298 struct spdk_nvme_transport_id trid = {}; 7299 struct spdk_nvme_ctrlr ctrlr = {}; 7300 int delete_done_rc = 0xDEADBEEF; 7301 int rc; 7302 7303 ut_init_trid(&trid); 7304 7305 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7306 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7307 7308 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7309 CU_ASSERT(rc == 0); 7310 7311 for (int i = 0; i < 20; i++) { 7312 poll_threads(); 7313 if (delete_done_rc == 0) { 7314 break; 7315 } 7316 spdk_delay_us(1000); 7317 } 7318 7319 CU_ASSERT(delete_done_rc == 0); 7320 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7321 } 7322 7323 static void 7324 test_ns_remove_during_reset(void) 7325 { 7326 struct nvme_path_id path = {}; 7327 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 7328 struct spdk_nvme_ctrlr *ctrlr; 7329 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7330 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7331 struct nvme_ctrlr *nvme_ctrlr; 7332 const int STRING_SIZE = 32; 7333 const char *attached_names[STRING_SIZE]; 7334 struct nvme_bdev *bdev; 7335 struct nvme_ns *nvme_ns; 7336 union spdk_nvme_async_event_completion event = {}; 7337 struct spdk_nvme_cpl cpl = {}; 7338 int rc; 7339 7340 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7341 ut_init_trid(&path.trid); 7342 7343 set_thread(0); 7344 7345 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7346 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7347 7348 g_ut_attach_ctrlr_status = 0; 7349 g_ut_attach_bdev_count = 1; 7350 7351 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7352 attach_ctrlr_done, NULL, &dopts, &opts, false); 7353 CU_ASSERT(rc == 0); 7354 7355 spdk_delay_us(1000); 7356 poll_threads(); 7357 7358 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7359 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7360 7361 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 7362 CU_ASSERT(nvme_ctrlr != NULL); 7363 7364 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7365 CU_ASSERT(bdev != NULL); 7366 7367 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7368 CU_ASSERT(nvme_ns != NULL); 7369 7370 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7371 * but nvme_ns->ns should be NULL. 7372 */ 7373 7374 CU_ASSERT(ctrlr->ns[0].is_active == true); 7375 ctrlr->ns[0].is_active = false; 7376 7377 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7378 CU_ASSERT(rc == 0); 7379 7380 poll_threads(); 7381 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7382 poll_threads(); 7383 7384 CU_ASSERT(nvme_ctrlr->resetting == false); 7385 CU_ASSERT(ctrlr->adminq.is_connected == true); 7386 7387 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7388 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7389 CU_ASSERT(nvme_ns->bdev == bdev); 7390 CU_ASSERT(nvme_ns->ns == NULL); 7391 7392 /* Then, async event should fill nvme_ns->ns again. */ 7393 7394 ctrlr->ns[0].is_active = true; 7395 7396 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7397 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7398 cpl.cdw0 = event.raw; 7399 7400 aer_cb(nvme_ctrlr, &cpl); 7401 7402 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7403 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7404 CU_ASSERT(nvme_ns->bdev == bdev); 7405 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7406 7407 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7408 CU_ASSERT(rc == 0); 7409 7410 poll_threads(); 7411 spdk_delay_us(1000); 7412 poll_threads(); 7413 7414 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7415 } 7416 7417 static void 7418 test_io_path_is_current(void) 7419 { 7420 struct nvme_bdev_channel nbdev_ch = { 7421 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 7422 }; 7423 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 7424 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 7425 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 7426 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }, 7427 nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 7428 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {}; 7429 struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 7430 struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 7431 struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, }; 7432 struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, }; 7433 struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, }; 7434 struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, }; 7435 struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 7436 struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 7437 struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 7438 7439 /* io_path1 is deleting */ 7440 io_path1.nbdev_ch = NULL; 7441 7442 CU_ASSERT(nvme_io_path_is_current(&io_path1) == false); 7443 7444 io_path1.nbdev_ch = &nbdev_ch; 7445 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 7446 io_path2.nbdev_ch = &nbdev_ch; 7447 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 7448 io_path3.nbdev_ch = &nbdev_ch; 7449 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 7450 7451 /* active/active: io_path is current if it is available and ANA optimized. */ 7452 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7453 7454 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7455 7456 /* active/active: io_path is not current if it is disconnected even if it is 7457 * ANA optimized. 7458 */ 7459 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7460 7461 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7462 7463 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7464 7465 /* active/passive: io_path is current if it is available and cached. 7466 * (only ANA optimized path is cached for active/passive.) 7467 */ 7468 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7469 nbdev_ch.current_io_path = &io_path2; 7470 7471 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7472 7473 /* active:passive: io_path is not current if it is disconnected even if it is cached */ 7474 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7475 7476 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7477 7478 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7479 7480 /* active/active and active/passive: io_path is not current if it is ANA inaccessible. */ 7481 nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 7482 7483 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7484 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7485 7486 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7487 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7488 7489 /* active/active: non-optimized path is current only if there is no optimized path. */ 7490 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7491 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7492 7493 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7494 7495 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7496 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7497 7498 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7499 7500 /* active/passive: current is true if it is the first one when there is no optimized path. */ 7501 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7502 nbdev_ch.current_io_path = NULL; 7503 7504 CU_ASSERT(nvme_io_path_is_current(&io_path1) == true); 7505 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7506 CU_ASSERT(nvme_io_path_is_current(&io_path3) == false); 7507 } 7508 7509 int 7510 main(int argc, char **argv) 7511 { 7512 CU_pSuite suite = NULL; 7513 unsigned int num_failures; 7514 7515 CU_initialize_registry(); 7516 7517 suite = CU_add_suite("nvme", NULL, NULL); 7518 7519 CU_ADD_TEST(suite, test_create_ctrlr); 7520 CU_ADD_TEST(suite, test_reset_ctrlr); 7521 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7522 CU_ADD_TEST(suite, test_failover_ctrlr); 7523 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7524 CU_ADD_TEST(suite, test_pending_reset); 7525 CU_ADD_TEST(suite, test_attach_ctrlr); 7526 CU_ADD_TEST(suite, test_aer_cb); 7527 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7528 CU_ADD_TEST(suite, test_add_remove_trid); 7529 CU_ADD_TEST(suite, test_abort); 7530 CU_ADD_TEST(suite, test_get_io_qpair); 7531 CU_ADD_TEST(suite, test_bdev_unregister); 7532 CU_ADD_TEST(suite, test_compare_ns); 7533 CU_ADD_TEST(suite, test_init_ana_log_page); 7534 CU_ADD_TEST(suite, test_get_memory_domains); 7535 CU_ADD_TEST(suite, test_reconnect_qpair); 7536 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7537 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7538 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7539 CU_ADD_TEST(suite, test_admin_path); 7540 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7541 CU_ADD_TEST(suite, test_find_io_path); 7542 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7543 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7544 CU_ADD_TEST(suite, test_retry_io_count); 7545 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7546 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7547 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7548 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7549 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7550 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7551 CU_ADD_TEST(suite, test_fail_path); 7552 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7553 CU_ADD_TEST(suite, test_ana_transition); 7554 CU_ADD_TEST(suite, test_set_preferred_path); 7555 CU_ADD_TEST(suite, test_find_next_io_path); 7556 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7557 CU_ADD_TEST(suite, test_disable_auto_failback); 7558 CU_ADD_TEST(suite, test_set_multipath_policy); 7559 CU_ADD_TEST(suite, test_uuid_generation); 7560 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7561 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7562 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7563 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7564 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7565 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7566 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7567 CU_ADD_TEST(suite, test_io_path_is_current); 7568 7569 allocate_threads(3); 7570 set_thread(0); 7571 bdev_nvme_library_init(); 7572 init_accel(); 7573 7574 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7575 7576 set_thread(0); 7577 bdev_nvme_library_fini(); 7578 fini_accel(); 7579 free_threads(); 7580 7581 CU_cleanup_registry(); 7582 7583 return num_failures; 7584 } 7585