1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr), 44 SPDK_ENV_NUMA_ID_ANY); 45 46 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 47 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 48 49 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 50 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 51 52 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 53 54 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 55 int error_code, const char *msg)); 56 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 57 (struct spdk_jsonrpc_request *request), NULL); 58 DEFINE_STUB_V(spdk_jsonrpc_end_result, 59 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 60 61 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 62 size_t opts_size)); 63 64 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 65 size_t opts_size), 0); 66 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 67 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 68 69 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 70 71 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 72 enum spdk_bdev_reset_stat_mode mode)); 73 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 74 struct spdk_bdev_io_stat *add)); 75 76 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 77 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 78 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 79 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 80 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0); 81 82 int 83 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 84 struct spdk_memory_domain **domains, int array_size) 85 { 86 int i, min_array_size; 87 88 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 89 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 90 for (i = 0; i < min_array_size; i++) { 91 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 92 } 93 } 94 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 95 96 return 0; 97 } 98 99 struct spdk_io_channel * 100 spdk_accel_get_io_channel(void) 101 { 102 return spdk_get_io_channel(g_accel_p); 103 } 104 105 void 106 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 107 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 108 { 109 /* Avoid warning that opts is used uninitialised */ 110 memset(opts, 0, opts_size); 111 } 112 113 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003" 114 115 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN}; 116 117 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 118 (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 121 (const struct spdk_nvme_ctrlr *ctrlr), 0); 122 123 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 124 (struct spdk_nvme_ctrlr *ctrlr), NULL); 125 126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 127 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 128 129 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 130 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 133 134 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 135 136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 137 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 138 139 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 140 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 141 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 142 143 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 144 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 145 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 146 147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 148 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 149 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 150 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 151 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 152 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 153 154 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 155 size_t *size), 0); 156 157 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 158 159 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 160 161 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns), 166 SPDK_NVME_16B_GUARD_PI); 167 168 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 169 170 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 171 172 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 173 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 174 175 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 176 177 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 178 char *name, size_t *size), 0); 179 180 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 181 (struct spdk_nvme_ns *ns), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 184 (const struct spdk_nvme_ctrlr *ctrlr), 0); 185 186 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 187 (struct spdk_nvme_ns *ns), 0); 188 189 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 190 (struct spdk_nvme_ns *ns), 0); 191 192 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 193 (struct spdk_nvme_ns *ns), 0); 194 195 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 196 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 197 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 198 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 199 200 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 201 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 202 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 203 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 204 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 205 206 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 208 void *payload, uint32_t payload_size, uint64_t slba, 209 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 210 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 211 212 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 213 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 214 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 215 216 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 217 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 218 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 219 220 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 221 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 222 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 223 224 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 225 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 226 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 227 228 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 229 230 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 231 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 232 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 233 234 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 235 (const struct spdk_nvme_status *status), NULL); 236 237 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 238 (const struct spdk_nvme_status *status), NULL); 239 240 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 241 242 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 243 244 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 245 246 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 247 248 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 249 250 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 251 struct iovec *iov, 252 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 253 DEFINE_STUB(spdk_accel_append_crc32c, int, 254 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 255 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 256 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 257 DEFINE_STUB_V(spdk_accel_sequence_finish, 258 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 259 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 260 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 261 262 struct ut_nvme_req { 263 uint16_t opc; 264 spdk_nvme_cmd_cb cb_fn; 265 void *cb_arg; 266 struct spdk_nvme_cpl cpl; 267 TAILQ_ENTRY(ut_nvme_req) tailq; 268 }; 269 270 struct spdk_nvme_ns { 271 struct spdk_nvme_ctrlr *ctrlr; 272 uint32_t id; 273 bool is_active; 274 struct spdk_uuid *uuid; 275 enum spdk_nvme_ana_state ana_state; 276 enum spdk_nvme_csi csi; 277 }; 278 279 struct spdk_nvme_qpair { 280 struct spdk_nvme_ctrlr *ctrlr; 281 uint8_t failure_reason; 282 bool is_connected; 283 bool in_completion_context; 284 bool delete_after_completion_context; 285 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 286 uint32_t num_outstanding_reqs; 287 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 288 struct spdk_nvme_poll_group *poll_group; 289 void *poll_group_tailq_head; 290 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 291 }; 292 293 struct spdk_nvme_ctrlr { 294 uint32_t num_ns; 295 struct spdk_nvme_ns *ns; 296 struct spdk_nvme_ns_data *nsdata; 297 struct spdk_nvme_qpair adminq; 298 struct spdk_nvme_ctrlr_data cdata; 299 bool attached; 300 bool is_failed; 301 bool fail_reset; 302 bool is_removed; 303 struct spdk_nvme_transport_id trid; 304 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 305 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 306 struct spdk_nvme_ctrlr_opts opts; 307 }; 308 309 struct spdk_nvme_poll_group { 310 void *ctx; 311 struct spdk_nvme_accel_fn_table accel_fn_table; 312 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 313 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 314 }; 315 316 struct spdk_nvme_probe_ctx { 317 struct spdk_nvme_transport_id trid; 318 void *cb_ctx; 319 spdk_nvme_attach_cb attach_cb; 320 struct spdk_nvme_ctrlr *init_ctrlr; 321 }; 322 323 uint32_t 324 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 325 { 326 uint32_t nsid; 327 328 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 329 if (ctrlr->ns[nsid - 1].is_active) { 330 return nsid; 331 } 332 } 333 334 return 0; 335 } 336 337 uint32_t 338 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 339 { 340 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 341 if (ctrlr->ns[nsid - 1].is_active) { 342 return nsid; 343 } 344 } 345 346 return 0; 347 } 348 349 uint32_t 350 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 351 { 352 return qpair->num_outstanding_reqs; 353 } 354 355 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 356 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 357 g_ut_attached_ctrlrs); 358 static int g_ut_attach_ctrlr_status; 359 static size_t g_ut_attach_bdev_count; 360 static int g_ut_register_bdev_status; 361 static struct spdk_bdev *g_ut_registered_bdev; 362 static uint16_t g_ut_cntlid; 363 static struct nvme_path_id g_any_path = {}; 364 365 static void 366 ut_init_trid(struct spdk_nvme_transport_id *trid) 367 { 368 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 369 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 370 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 371 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 372 } 373 374 static void 375 ut_init_trid2(struct spdk_nvme_transport_id *trid) 376 { 377 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 378 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 379 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 380 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 381 } 382 383 static void 384 ut_init_trid3(struct spdk_nvme_transport_id *trid) 385 { 386 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 387 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 388 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 389 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 390 } 391 392 static int 393 cmp_int(int a, int b) 394 { 395 return a - b; 396 } 397 398 int 399 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 400 const struct spdk_nvme_transport_id *trid2) 401 { 402 int cmp; 403 404 /* We assume trtype is TCP for now. */ 405 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 406 407 cmp = cmp_int(trid1->trtype, trid2->trtype); 408 if (cmp) { 409 return cmp; 410 } 411 412 cmp = strcasecmp(trid1->traddr, trid2->traddr); 413 if (cmp) { 414 return cmp; 415 } 416 417 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 418 if (cmp) { 419 return cmp; 420 } 421 422 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 423 if (cmp) { 424 return cmp; 425 } 426 427 cmp = strcmp(trid1->subnqn, trid2->subnqn); 428 if (cmp) { 429 return cmp; 430 } 431 432 return 0; 433 } 434 435 static struct spdk_nvme_ctrlr * 436 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 437 bool ana_reporting, bool multipath) 438 { 439 struct spdk_nvme_ctrlr *ctrlr; 440 uint32_t i; 441 442 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 443 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 444 /* There is a ctrlr whose trid matches. */ 445 return NULL; 446 } 447 } 448 449 ctrlr = calloc(1, sizeof(*ctrlr)); 450 if (ctrlr == NULL) { 451 return NULL; 452 } 453 454 ctrlr->attached = true; 455 ctrlr->adminq.ctrlr = ctrlr; 456 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 457 ctrlr->adminq.is_connected = true; 458 459 if (num_ns != 0) { 460 ctrlr->num_ns = num_ns; 461 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 462 if (ctrlr->ns == NULL) { 463 free(ctrlr); 464 return NULL; 465 } 466 467 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 468 if (ctrlr->nsdata == NULL) { 469 free(ctrlr->ns); 470 free(ctrlr); 471 return NULL; 472 } 473 474 for (i = 0; i < num_ns; i++) { 475 ctrlr->ns[i].id = i + 1; 476 ctrlr->ns[i].ctrlr = ctrlr; 477 ctrlr->ns[i].is_active = true; 478 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 479 ctrlr->nsdata[i].nsze = 1024; 480 ctrlr->nsdata[i].nmic.can_share = multipath; 481 } 482 483 ctrlr->cdata.nn = num_ns; 484 ctrlr->cdata.mnan = num_ns; 485 ctrlr->cdata.nanagrpid = num_ns; 486 } 487 488 ctrlr->cdata.cntlid = ++g_ut_cntlid; 489 ctrlr->cdata.cmic.multi_ctrlr = multipath; 490 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 491 ctrlr->trid = *trid; 492 TAILQ_INIT(&ctrlr->active_io_qpairs); 493 494 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 495 496 return ctrlr; 497 } 498 499 static void 500 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 501 { 502 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 503 504 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 505 free(ctrlr->nsdata); 506 free(ctrlr->ns); 507 free(ctrlr); 508 } 509 510 static int 511 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 512 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 513 { 514 struct ut_nvme_req *req; 515 516 req = calloc(1, sizeof(*req)); 517 if (req == NULL) { 518 return -ENOMEM; 519 } 520 521 req->opc = opc; 522 req->cb_fn = cb_fn; 523 req->cb_arg = cb_arg; 524 525 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 526 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 527 528 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 529 qpair->num_outstanding_reqs++; 530 531 return 0; 532 } 533 534 static struct ut_nvme_req * 535 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 536 { 537 struct ut_nvme_req *req; 538 539 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 540 if (req->cb_arg == cb_arg) { 541 break; 542 } 543 } 544 545 return req; 546 } 547 548 static struct spdk_bdev_io * 549 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 550 struct spdk_io_channel *ch) 551 { 552 struct spdk_bdev_io *bdev_io; 553 554 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 555 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 556 bdev_io->type = type; 557 bdev_io->bdev = &nbdev->disk; 558 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 559 560 return bdev_io; 561 } 562 563 static void 564 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 565 { 566 bdev_io->u.bdev.iovs = &bdev_io->iov; 567 bdev_io->u.bdev.iovcnt = 1; 568 569 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 570 bdev_io->iov.iov_len = 4096; 571 } 572 573 static void 574 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 575 { 576 if (ctrlr->is_failed) { 577 free(ctrlr); 578 return; 579 } 580 581 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 582 if (probe_ctx->cb_ctx) { 583 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 584 } 585 586 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 587 588 if (probe_ctx->attach_cb) { 589 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 590 } 591 } 592 593 int 594 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 595 { 596 struct spdk_nvme_ctrlr *ctrlr, *tmp; 597 598 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 599 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 600 continue; 601 } 602 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 603 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 604 } 605 606 free(probe_ctx); 607 608 return 0; 609 } 610 611 struct spdk_nvme_probe_ctx * 612 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 613 const struct spdk_nvme_ctrlr_opts *opts, 614 spdk_nvme_attach_cb attach_cb) 615 { 616 struct spdk_nvme_probe_ctx *probe_ctx; 617 618 if (trid == NULL) { 619 return NULL; 620 } 621 622 probe_ctx = calloc(1, sizeof(*probe_ctx)); 623 if (probe_ctx == NULL) { 624 return NULL; 625 } 626 627 probe_ctx->trid = *trid; 628 probe_ctx->cb_ctx = (void *)opts; 629 probe_ctx->attach_cb = attach_cb; 630 631 return probe_ctx; 632 } 633 634 int 635 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 636 { 637 if (ctrlr->attached) { 638 ut_detach_ctrlr(ctrlr); 639 } 640 641 return 0; 642 } 643 644 int 645 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 646 { 647 SPDK_CU_ASSERT_FATAL(ctx != NULL); 648 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 649 650 return 0; 651 } 652 653 int 654 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 655 { 656 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 657 } 658 659 void 660 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 661 { 662 memset(opts, 0, opts_size); 663 664 snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN); 665 } 666 667 const struct spdk_nvme_ctrlr_data * 668 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 669 { 670 return &ctrlr->cdata; 671 } 672 673 uint32_t 674 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 675 { 676 return ctrlr->num_ns; 677 } 678 679 struct spdk_nvme_ns * 680 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 681 { 682 if (nsid < 1 || nsid > ctrlr->num_ns) { 683 return NULL; 684 } 685 686 return &ctrlr->ns[nsid - 1]; 687 } 688 689 bool 690 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 691 { 692 if (nsid < 1 || nsid > ctrlr->num_ns) { 693 return false; 694 } 695 696 return ctrlr->ns[nsid - 1].is_active; 697 } 698 699 union spdk_nvme_csts_register 700 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 701 { 702 union spdk_nvme_csts_register csts; 703 704 csts.raw = 0; 705 706 return csts; 707 } 708 709 union spdk_nvme_vs_register 710 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 711 { 712 union spdk_nvme_vs_register vs; 713 714 vs.raw = 0; 715 716 return vs; 717 } 718 719 struct spdk_nvme_qpair * 720 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 721 const struct spdk_nvme_io_qpair_opts *user_opts, 722 size_t opts_size) 723 { 724 struct spdk_nvme_qpair *qpair; 725 726 qpair = calloc(1, sizeof(*qpair)); 727 if (qpair == NULL) { 728 return NULL; 729 } 730 731 qpair->ctrlr = ctrlr; 732 TAILQ_INIT(&qpair->outstanding_reqs); 733 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 734 735 return qpair; 736 } 737 738 static void 739 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 740 { 741 struct spdk_nvme_poll_group *group = qpair->poll_group; 742 743 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 744 745 qpair->poll_group_tailq_head = &group->connected_qpairs; 746 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 747 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 748 } 749 750 static void 751 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 752 { 753 struct spdk_nvme_poll_group *group = qpair->poll_group; 754 755 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 756 757 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 758 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 759 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 760 } 761 762 int 763 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 764 struct spdk_nvme_qpair *qpair) 765 { 766 if (qpair->is_connected) { 767 return -EISCONN; 768 } 769 770 qpair->is_connected = true; 771 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 772 773 if (qpair->poll_group) { 774 nvme_poll_group_connect_qpair(qpair); 775 } 776 777 return 0; 778 } 779 780 void 781 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 782 { 783 if (!qpair->is_connected) { 784 return; 785 } 786 787 qpair->is_connected = false; 788 789 if (qpair->poll_group != NULL) { 790 nvme_poll_group_disconnect_qpair(qpair); 791 } 792 } 793 794 int 795 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 796 { 797 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 798 799 if (qpair->in_completion_context) { 800 qpair->delete_after_completion_context = true; 801 return 0; 802 } 803 804 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 805 806 if (qpair->poll_group != NULL) { 807 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 808 } 809 810 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 811 812 CU_ASSERT(qpair->num_outstanding_reqs == 0); 813 814 free(qpair); 815 816 return 0; 817 } 818 819 int 820 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 821 { 822 if (ctrlr->fail_reset) { 823 ctrlr->is_failed = true; 824 return -EIO; 825 } 826 827 ctrlr->adminq.is_connected = true; 828 return 0; 829 } 830 831 void 832 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 833 { 834 } 835 836 int 837 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 838 { 839 if (ctrlr->is_removed) { 840 return -ENXIO; 841 } 842 843 ctrlr->adminq.is_connected = false; 844 ctrlr->is_failed = false; 845 846 return 0; 847 } 848 849 void 850 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 851 { 852 ctrlr->is_failed = true; 853 } 854 855 bool 856 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 857 { 858 return ctrlr->is_failed; 859 } 860 861 spdk_nvme_qp_failure_reason 862 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 863 { 864 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 865 } 866 867 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 868 sizeof(uint32_t)) 869 static void 870 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 871 { 872 struct spdk_nvme_ana_page ana_hdr; 873 char _ana_desc[UT_ANA_DESC_SIZE]; 874 struct spdk_nvme_ana_group_descriptor *ana_desc; 875 struct spdk_nvme_ns *ns; 876 uint32_t i; 877 878 memset(&ana_hdr, 0, sizeof(ana_hdr)); 879 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 880 881 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 882 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 883 884 buf += sizeof(ana_hdr); 885 length -= sizeof(ana_hdr); 886 887 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 888 889 for (i = 0; i < ctrlr->num_ns; i++) { 890 ns = &ctrlr->ns[i]; 891 892 if (!ns->is_active) { 893 continue; 894 } 895 896 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 897 898 ana_desc->ana_group_id = ns->id; 899 ana_desc->num_of_nsid = 1; 900 ana_desc->ana_state = ns->ana_state; 901 ana_desc->nsid[0] = ns->id; 902 903 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 904 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 905 906 buf += UT_ANA_DESC_SIZE; 907 length -= UT_ANA_DESC_SIZE; 908 } 909 } 910 911 int 912 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 913 uint8_t log_page, uint32_t nsid, 914 void *payload, uint32_t payload_size, 915 uint64_t offset, 916 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 917 { 918 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 919 SPDK_CU_ASSERT_FATAL(offset == 0); 920 ut_create_ana_log_page(ctrlr, payload, payload_size); 921 } 922 923 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 924 cb_fn, cb_arg); 925 } 926 927 int 928 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 929 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 930 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 931 { 932 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 933 } 934 935 int 936 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 937 void *cmd_cb_arg, 938 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 939 { 940 struct ut_nvme_req *req = NULL, *abort_req; 941 942 if (qpair == NULL) { 943 qpair = &ctrlr->adminq; 944 } 945 946 abort_req = calloc(1, sizeof(*abort_req)); 947 if (abort_req == NULL) { 948 return -ENOMEM; 949 } 950 951 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 952 if (req->cb_arg == cmd_cb_arg) { 953 break; 954 } 955 } 956 957 if (req == NULL) { 958 free(abort_req); 959 return -ENOENT; 960 } 961 962 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 963 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 964 965 abort_req->opc = SPDK_NVME_OPC_ABORT; 966 abort_req->cb_fn = cb_fn; 967 abort_req->cb_arg = cb_arg; 968 969 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 970 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 971 abort_req->cpl.cdw0 = 0; 972 973 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 974 ctrlr->adminq.num_outstanding_reqs++; 975 976 return 0; 977 } 978 979 int32_t 980 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 981 { 982 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 983 } 984 985 uint32_t 986 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 987 { 988 return ns->id; 989 } 990 991 struct spdk_nvme_ctrlr * 992 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 993 { 994 return ns->ctrlr; 995 } 996 997 static inline struct spdk_nvme_ns_data * 998 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 999 { 1000 return &ns->ctrlr->nsdata[ns->id - 1]; 1001 } 1002 1003 const struct spdk_nvme_ns_data * 1004 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 1005 { 1006 return _nvme_ns_get_data(ns); 1007 } 1008 1009 uint64_t 1010 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1011 { 1012 return _nvme_ns_get_data(ns)->nsze; 1013 } 1014 1015 const struct spdk_uuid * 1016 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1017 { 1018 return ns->uuid; 1019 } 1020 1021 enum spdk_nvme_csi 1022 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1023 return ns->csi; 1024 } 1025 1026 int 1027 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1028 void *metadata, uint64_t lba, uint32_t lba_count, 1029 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1030 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1031 { 1032 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1033 } 1034 1035 int 1036 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1037 void *buffer, void *metadata, uint64_t lba, 1038 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1039 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1040 { 1041 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1042 } 1043 1044 int 1045 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1046 uint64_t lba, uint32_t lba_count, 1047 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1048 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1049 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1050 uint16_t apptag_mask, uint16_t apptag) 1051 { 1052 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1053 } 1054 1055 int 1056 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1057 uint64_t lba, uint32_t lba_count, 1058 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1059 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1060 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1061 uint16_t apptag_mask, uint16_t apptag) 1062 { 1063 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1064 } 1065 1066 static bool g_ut_readv_ext_called; 1067 int 1068 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1069 uint64_t lba, uint32_t lba_count, 1070 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1071 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1072 spdk_nvme_req_next_sge_cb next_sge_fn, 1073 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1074 { 1075 g_ut_readv_ext_called = true; 1076 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1077 } 1078 1079 static bool g_ut_read_ext_called; 1080 int 1081 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1082 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1083 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1084 { 1085 g_ut_read_ext_called = true; 1086 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1087 } 1088 1089 static bool g_ut_writev_ext_called; 1090 int 1091 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1092 uint64_t lba, uint32_t lba_count, 1093 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1094 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1095 spdk_nvme_req_next_sge_cb next_sge_fn, 1096 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1097 { 1098 g_ut_writev_ext_called = true; 1099 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1100 } 1101 1102 static bool g_ut_write_ext_called; 1103 int 1104 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1105 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1106 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1107 { 1108 g_ut_write_ext_called = true; 1109 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1110 } 1111 1112 int 1113 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1114 uint64_t lba, uint32_t lba_count, 1115 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1116 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1117 spdk_nvme_req_next_sge_cb next_sge_fn, 1118 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1119 { 1120 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1121 } 1122 1123 int 1124 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1125 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1126 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1127 { 1128 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1129 } 1130 1131 int 1132 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1133 uint64_t lba, uint32_t lba_count, 1134 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1135 uint32_t io_flags) 1136 { 1137 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1138 } 1139 1140 int 1141 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1142 const struct spdk_nvme_scc_source_range *ranges, 1143 uint16_t num_ranges, uint64_t dest_lba, 1144 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1145 { 1146 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1147 } 1148 1149 struct spdk_nvme_poll_group * 1150 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1151 { 1152 struct spdk_nvme_poll_group *group; 1153 1154 group = calloc(1, sizeof(*group)); 1155 if (group == NULL) { 1156 return NULL; 1157 } 1158 1159 group->ctx = ctx; 1160 if (table != NULL) { 1161 group->accel_fn_table = *table; 1162 } 1163 TAILQ_INIT(&group->connected_qpairs); 1164 TAILQ_INIT(&group->disconnected_qpairs); 1165 1166 return group; 1167 } 1168 1169 int 1170 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1171 { 1172 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1173 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1174 return -EBUSY; 1175 } 1176 1177 free(group); 1178 1179 return 0; 1180 } 1181 1182 spdk_nvme_qp_failure_reason 1183 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1184 { 1185 return qpair->failure_reason; 1186 } 1187 1188 bool 1189 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1190 { 1191 return qpair->is_connected; 1192 } 1193 1194 int32_t 1195 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1196 uint32_t max_completions) 1197 { 1198 struct ut_nvme_req *req, *tmp; 1199 uint32_t num_completions = 0; 1200 1201 if (!qpair->is_connected) { 1202 return -ENXIO; 1203 } 1204 1205 qpair->in_completion_context = true; 1206 1207 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1208 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1209 qpair->num_outstanding_reqs--; 1210 1211 req->cb_fn(req->cb_arg, &req->cpl); 1212 1213 free(req); 1214 num_completions++; 1215 } 1216 1217 qpair->in_completion_context = false; 1218 if (qpair->delete_after_completion_context) { 1219 spdk_nvme_ctrlr_free_io_qpair(qpair); 1220 } 1221 1222 return num_completions; 1223 } 1224 1225 int64_t 1226 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1227 uint32_t completions_per_qpair, 1228 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1229 { 1230 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1231 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1232 1233 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1234 1235 if (disconnected_qpair_cb == NULL) { 1236 return -EINVAL; 1237 } 1238 1239 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1240 disconnected_qpair_cb(qpair, group->ctx); 1241 } 1242 1243 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1244 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1245 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1246 /* Bump the number of completions so this counts as "busy" */ 1247 num_completions++; 1248 continue; 1249 } 1250 1251 local_completions = spdk_nvme_qpair_process_completions(qpair, 1252 completions_per_qpair); 1253 if (local_completions < 0 && error_reason == 0) { 1254 error_reason = local_completions; 1255 } else { 1256 num_completions += local_completions; 1257 assert(num_completions >= 0); 1258 } 1259 } 1260 1261 return error_reason ? error_reason : num_completions; 1262 } 1263 1264 int 1265 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1266 struct spdk_nvme_qpair *qpair) 1267 { 1268 CU_ASSERT(!qpair->is_connected); 1269 1270 qpair->poll_group = group; 1271 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1272 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1273 1274 return 0; 1275 } 1276 1277 int 1278 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1279 struct spdk_nvme_qpair *qpair) 1280 { 1281 CU_ASSERT(!qpair->is_connected); 1282 1283 if (qpair->poll_group == NULL) { 1284 return -ENOENT; 1285 } 1286 1287 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1288 1289 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1290 1291 qpair->poll_group = NULL; 1292 qpair->poll_group_tailq_head = NULL; 1293 1294 return 0; 1295 } 1296 1297 int 1298 spdk_bdev_register(struct spdk_bdev *bdev) 1299 { 1300 g_ut_registered_bdev = bdev; 1301 1302 return g_ut_register_bdev_status; 1303 } 1304 1305 void 1306 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1307 { 1308 int rc; 1309 1310 rc = bdev->fn_table->destruct(bdev->ctxt); 1311 1312 if (bdev == g_ut_registered_bdev) { 1313 g_ut_registered_bdev = NULL; 1314 } 1315 1316 if (rc <= 0 && cb_fn != NULL) { 1317 cb_fn(cb_arg, rc); 1318 } 1319 } 1320 1321 int 1322 spdk_bdev_open_ext(const char *bdev_name, bool write, 1323 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1324 struct spdk_bdev_desc **desc) 1325 { 1326 if (g_ut_registered_bdev == NULL || 1327 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1328 return -ENODEV; 1329 } 1330 1331 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1332 1333 return 0; 1334 } 1335 1336 struct spdk_bdev * 1337 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1338 { 1339 return (struct spdk_bdev *)desc; 1340 } 1341 1342 int 1343 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1344 { 1345 bdev->blockcnt = size; 1346 1347 return 0; 1348 } 1349 1350 struct spdk_io_channel * 1351 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1352 { 1353 return (struct spdk_io_channel *)bdev_io->internal.ch; 1354 } 1355 1356 struct spdk_thread * 1357 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1358 { 1359 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1360 } 1361 1362 void 1363 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1364 { 1365 bdev_io->internal.status = status; 1366 bdev_io->internal.in_submit_request = false; 1367 } 1368 1369 void 1370 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1371 { 1372 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1373 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1374 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1375 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1376 } else { 1377 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1378 } 1379 1380 bdev_io->internal.error.nvme.cdw0 = cdw0; 1381 bdev_io->internal.error.nvme.sct = sct; 1382 bdev_io->internal.error.nvme.sc = sc; 1383 1384 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1385 } 1386 1387 void 1388 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1389 { 1390 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1391 1392 ut_bdev_io_set_buf(bdev_io); 1393 1394 cb(ch, bdev_io, true); 1395 } 1396 1397 static void 1398 test_create_ctrlr(void) 1399 { 1400 struct spdk_nvme_transport_id trid = {}; 1401 struct spdk_nvme_ctrlr ctrlr = {}; 1402 int rc; 1403 1404 ut_init_trid(&trid); 1405 1406 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1407 CU_ASSERT(rc == 0); 1408 1409 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1410 1411 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1412 CU_ASSERT(rc == 0); 1413 1414 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1415 1416 poll_threads(); 1417 spdk_delay_us(1000); 1418 poll_threads(); 1419 1420 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1421 } 1422 1423 static void 1424 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1425 { 1426 bool *detect_remove = cb_arg; 1427 1428 CU_ASSERT(rc != 0); 1429 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1430 1431 *detect_remove = true; 1432 } 1433 1434 static void 1435 test_reset_ctrlr(void) 1436 { 1437 struct spdk_nvme_transport_id trid = {}; 1438 struct spdk_nvme_ctrlr ctrlr = {}; 1439 struct nvme_ctrlr *nvme_ctrlr = NULL; 1440 struct nvme_path_id *curr_trid; 1441 struct spdk_io_channel *ch1, *ch2; 1442 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1443 bool detect_remove; 1444 int rc; 1445 1446 ut_init_trid(&trid); 1447 TAILQ_INIT(&ctrlr.active_io_qpairs); 1448 1449 set_thread(0); 1450 1451 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1452 CU_ASSERT(rc == 0); 1453 1454 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1455 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1456 1457 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1458 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1459 1460 ch1 = spdk_get_io_channel(nvme_ctrlr); 1461 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1462 1463 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1464 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1465 1466 set_thread(1); 1467 1468 ch2 = spdk_get_io_channel(nvme_ctrlr); 1469 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1470 1471 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1472 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1473 1474 /* Reset starts from thread 1. */ 1475 set_thread(1); 1476 1477 /* Case 1: ctrlr is already being destructed. */ 1478 nvme_ctrlr->destruct = true; 1479 1480 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1481 CU_ASSERT(rc == -ENXIO); 1482 1483 /* Case 2: reset is in progress. */ 1484 nvme_ctrlr->destruct = false; 1485 nvme_ctrlr->resetting = true; 1486 1487 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1488 CU_ASSERT(rc == -EBUSY); 1489 1490 /* Case 3: reset completes successfully. */ 1491 nvme_ctrlr->resetting = false; 1492 curr_trid->last_failed_tsc = spdk_get_ticks(); 1493 ctrlr.is_failed = true; 1494 1495 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1496 CU_ASSERT(rc == 0); 1497 CU_ASSERT(nvme_ctrlr->resetting == true); 1498 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1499 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1500 1501 poll_thread_times(0, 3); 1502 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1503 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1504 1505 poll_thread_times(0, 1); 1506 poll_thread_times(1, 1); 1507 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1508 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1509 CU_ASSERT(ctrlr.is_failed == true); 1510 1511 poll_thread_times(1, 1); 1512 poll_thread_times(0, 1); 1513 CU_ASSERT(ctrlr.is_failed == false); 1514 CU_ASSERT(ctrlr.adminq.is_connected == false); 1515 1516 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1517 poll_thread_times(0, 2); 1518 CU_ASSERT(ctrlr.adminq.is_connected == true); 1519 1520 poll_thread_times(0, 1); 1521 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1522 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1523 1524 poll_thread_times(1, 1); 1525 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1526 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1527 CU_ASSERT(nvme_ctrlr->resetting == true); 1528 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1529 1530 poll_thread_times(0, 2); 1531 CU_ASSERT(nvme_ctrlr->resetting == true); 1532 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1533 poll_thread_times(1, 1); 1534 CU_ASSERT(nvme_ctrlr->resetting == true); 1535 poll_thread_times(0, 1); 1536 CU_ASSERT(nvme_ctrlr->resetting == false); 1537 1538 /* Case 4: ctrlr is already removed. */ 1539 ctrlr.is_removed = true; 1540 1541 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1542 CU_ASSERT(rc == 0); 1543 1544 detect_remove = false; 1545 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1546 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1547 1548 poll_threads(); 1549 1550 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1551 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1552 CU_ASSERT(detect_remove == true); 1553 1554 ctrlr.is_removed = false; 1555 1556 spdk_put_io_channel(ch2); 1557 1558 set_thread(0); 1559 1560 spdk_put_io_channel(ch1); 1561 1562 poll_threads(); 1563 1564 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1565 CU_ASSERT(rc == 0); 1566 1567 poll_threads(); 1568 spdk_delay_us(1000); 1569 poll_threads(); 1570 1571 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1572 } 1573 1574 static void 1575 test_race_between_reset_and_destruct_ctrlr(void) 1576 { 1577 struct spdk_nvme_transport_id trid = {}; 1578 struct spdk_nvme_ctrlr ctrlr = {}; 1579 struct nvme_ctrlr *nvme_ctrlr; 1580 struct spdk_io_channel *ch1, *ch2; 1581 int rc; 1582 1583 ut_init_trid(&trid); 1584 TAILQ_INIT(&ctrlr.active_io_qpairs); 1585 1586 set_thread(0); 1587 1588 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1589 CU_ASSERT(rc == 0); 1590 1591 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1592 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1593 1594 ch1 = spdk_get_io_channel(nvme_ctrlr); 1595 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1596 1597 set_thread(1); 1598 1599 ch2 = spdk_get_io_channel(nvme_ctrlr); 1600 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1601 1602 /* Reset starts from thread 1. */ 1603 set_thread(1); 1604 1605 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1606 CU_ASSERT(rc == 0); 1607 CU_ASSERT(nvme_ctrlr->resetting == true); 1608 1609 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1610 set_thread(0); 1611 1612 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1613 CU_ASSERT(rc == 0); 1614 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1615 CU_ASSERT(nvme_ctrlr->destruct == true); 1616 CU_ASSERT(nvme_ctrlr->resetting == true); 1617 1618 poll_threads(); 1619 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1620 poll_threads(); 1621 1622 /* Reset completed but ctrlr is not still destructed yet. */ 1623 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1624 CU_ASSERT(nvme_ctrlr->destruct == true); 1625 CU_ASSERT(nvme_ctrlr->resetting == false); 1626 1627 /* New reset request is rejected. */ 1628 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1629 CU_ASSERT(rc == -ENXIO); 1630 1631 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1632 * However there are two channels and destruct is not completed yet. 1633 */ 1634 poll_threads(); 1635 1636 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1637 1638 set_thread(0); 1639 1640 spdk_put_io_channel(ch1); 1641 1642 set_thread(1); 1643 1644 spdk_put_io_channel(ch2); 1645 1646 poll_threads(); 1647 spdk_delay_us(1000); 1648 poll_threads(); 1649 1650 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1651 } 1652 1653 static void 1654 test_failover_ctrlr(void) 1655 { 1656 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1657 struct spdk_nvme_ctrlr ctrlr = {}; 1658 struct nvme_ctrlr *nvme_ctrlr = NULL; 1659 struct nvme_path_id *curr_trid, *next_trid; 1660 struct spdk_io_channel *ch1, *ch2; 1661 int rc; 1662 1663 ut_init_trid(&trid1); 1664 ut_init_trid2(&trid2); 1665 TAILQ_INIT(&ctrlr.active_io_qpairs); 1666 1667 set_thread(0); 1668 1669 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1670 CU_ASSERT(rc == 0); 1671 1672 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1673 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1674 1675 ch1 = spdk_get_io_channel(nvme_ctrlr); 1676 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1677 1678 set_thread(1); 1679 1680 ch2 = spdk_get_io_channel(nvme_ctrlr); 1681 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1682 1683 /* First, test one trid case. */ 1684 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1685 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1686 1687 /* Failover starts from thread 1. */ 1688 set_thread(1); 1689 1690 /* Case 1: ctrlr is already being destructed. */ 1691 nvme_ctrlr->destruct = true; 1692 1693 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1694 CU_ASSERT(rc == -ENXIO); 1695 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1696 1697 /* Case 2: reset is in progress. */ 1698 nvme_ctrlr->destruct = false; 1699 nvme_ctrlr->resetting = true; 1700 1701 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1702 CU_ASSERT(rc == -EINPROGRESS); 1703 1704 /* Case 3: reset completes successfully. */ 1705 nvme_ctrlr->resetting = false; 1706 1707 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1708 CU_ASSERT(rc == 0); 1709 1710 CU_ASSERT(nvme_ctrlr->resetting == true); 1711 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1712 1713 poll_threads(); 1714 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1715 poll_threads(); 1716 1717 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1718 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1719 1720 CU_ASSERT(nvme_ctrlr->resetting == false); 1721 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1722 1723 set_thread(0); 1724 1725 /* Second, test two trids case. */ 1726 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1727 CU_ASSERT(rc == 0); 1728 1729 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1730 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1731 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1732 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1733 1734 /* Failover starts from thread 1. */ 1735 set_thread(1); 1736 1737 /* Case 4: reset is in progress. */ 1738 nvme_ctrlr->resetting = true; 1739 1740 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1741 CU_ASSERT(rc == -EINPROGRESS); 1742 1743 /* Case 5: failover completes successfully. */ 1744 nvme_ctrlr->resetting = false; 1745 1746 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1747 CU_ASSERT(rc == 0); 1748 1749 CU_ASSERT(nvme_ctrlr->resetting == true); 1750 1751 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1752 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1753 CU_ASSERT(next_trid != curr_trid); 1754 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1755 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1756 1757 poll_threads(); 1758 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1759 poll_threads(); 1760 1761 CU_ASSERT(nvme_ctrlr->resetting == false); 1762 1763 spdk_put_io_channel(ch2); 1764 1765 set_thread(0); 1766 1767 spdk_put_io_channel(ch1); 1768 1769 poll_threads(); 1770 1771 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1772 CU_ASSERT(rc == 0); 1773 1774 poll_threads(); 1775 spdk_delay_us(1000); 1776 poll_threads(); 1777 1778 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1779 } 1780 1781 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1782 * 1783 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1784 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1785 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1786 * have been active, i.e., the head of the list until the failover completed. 1787 * However trid3 was inserted to the head of the list by mistake. 1788 * 1789 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1790 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1791 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1792 * may be executed repeatedly before failover is executed. Hence this bug is real. 1793 * 1794 * The following test verifies the fix. 1795 */ 1796 static void 1797 test_race_between_failover_and_add_secondary_trid(void) 1798 { 1799 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1800 struct spdk_nvme_ctrlr ctrlr = {}; 1801 struct nvme_ctrlr *nvme_ctrlr = NULL; 1802 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1803 struct spdk_io_channel *ch1, *ch2; 1804 int rc; 1805 1806 ut_init_trid(&trid1); 1807 ut_init_trid2(&trid2); 1808 ut_init_trid3(&trid3); 1809 TAILQ_INIT(&ctrlr.active_io_qpairs); 1810 1811 set_thread(0); 1812 1813 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1814 CU_ASSERT(rc == 0); 1815 1816 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1817 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1818 1819 ch1 = spdk_get_io_channel(nvme_ctrlr); 1820 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1821 1822 set_thread(1); 1823 1824 ch2 = spdk_get_io_channel(nvme_ctrlr); 1825 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1826 1827 set_thread(0); 1828 1829 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1830 CU_ASSERT(rc == 0); 1831 1832 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1833 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1834 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1835 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1836 path_id2 = TAILQ_NEXT(path_id1, link); 1837 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1838 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1839 1840 ctrlr.fail_reset = true; 1841 1842 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1843 CU_ASSERT(rc == 0); 1844 1845 poll_threads(); 1846 1847 CU_ASSERT(path_id1->last_failed_tsc != 0); 1848 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1849 1850 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1851 CU_ASSERT(rc == 0); 1852 1853 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1854 CU_ASSERT(rc == 0); 1855 1856 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1857 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1858 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1859 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1860 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1861 path_id3 = TAILQ_NEXT(path_id2, link); 1862 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1863 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1864 1865 poll_threads(); 1866 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1867 poll_threads(); 1868 1869 spdk_put_io_channel(ch1); 1870 1871 set_thread(1); 1872 1873 spdk_put_io_channel(ch2); 1874 1875 poll_threads(); 1876 1877 set_thread(0); 1878 1879 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1880 CU_ASSERT(rc == 0); 1881 1882 poll_threads(); 1883 spdk_delay_us(1000); 1884 poll_threads(); 1885 1886 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1887 } 1888 1889 static void 1890 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1891 { 1892 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1893 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1894 } 1895 1896 static void 1897 test_pending_reset(void) 1898 { 1899 struct spdk_nvme_transport_id trid = {}; 1900 struct spdk_nvme_ctrlr *ctrlr; 1901 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 1902 struct nvme_ctrlr *nvme_ctrlr = NULL; 1903 const int STRING_SIZE = 32; 1904 const char *attached_names[STRING_SIZE]; 1905 struct nvme_bdev *bdev; 1906 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1907 struct spdk_io_channel *ch1, *ch2; 1908 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1909 struct nvme_io_path *io_path1, *io_path2; 1910 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1911 int rc; 1912 1913 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1914 ut_init_trid(&trid); 1915 1916 set_thread(0); 1917 1918 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1919 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1920 1921 g_ut_attach_ctrlr_status = 0; 1922 g_ut_attach_bdev_count = 1; 1923 1924 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1925 attach_ctrlr_done, NULL, &opts, NULL, false); 1926 CU_ASSERT(rc == 0); 1927 1928 spdk_delay_us(1000); 1929 poll_threads(); 1930 1931 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1932 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1933 1934 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1935 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1936 1937 ch1 = spdk_get_io_channel(bdev); 1938 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1939 1940 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1941 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1942 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1943 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1944 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1945 1946 set_thread(1); 1947 1948 ch2 = spdk_get_io_channel(bdev); 1949 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1950 1951 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1952 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1953 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1954 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1955 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1956 1957 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1958 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1959 1960 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1961 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1962 1963 /* The first reset request is submitted on thread 1, and the second reset request 1964 * is submitted on thread 0 while processing the first request. 1965 */ 1966 bdev_nvme_submit_request(ch2, first_bdev_io); 1967 1968 poll_thread_times(0, 1); 1969 poll_thread_times(1, 2); 1970 1971 CU_ASSERT(nvme_ctrlr->resetting == true); 1972 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1973 1974 set_thread(0); 1975 1976 bdev_nvme_submit_request(ch1, second_bdev_io); 1977 1978 poll_thread_times(0, 1); 1979 poll_thread_times(1, 1); 1980 poll_thread_times(0, 2); 1981 poll_thread_times(1, 1); 1982 poll_thread_times(0, 1); 1983 1984 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1985 1986 poll_threads(); 1987 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1988 poll_threads(); 1989 1990 CU_ASSERT(nvme_ctrlr->resetting == false); 1991 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1992 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1993 1994 /* The first reset request is submitted on thread 1, and the second reset request 1995 * is submitted on thread 0 while processing the first request. 1996 * 1997 * The difference from the above scenario is that the controller is removed while 1998 * processing the first request. Hence both reset requests should fail. 1999 */ 2000 set_thread(1); 2001 2002 bdev_nvme_submit_request(ch2, first_bdev_io); 2003 2004 poll_thread_times(0, 1); 2005 poll_thread_times(1, 2); 2006 2007 CU_ASSERT(nvme_ctrlr->resetting == true); 2008 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 2009 2010 set_thread(0); 2011 2012 bdev_nvme_submit_request(ch1, second_bdev_io); 2013 2014 poll_thread_times(0, 1); 2015 poll_thread_times(1, 1); 2016 poll_thread_times(0, 2); 2017 poll_thread_times(1, 1); 2018 poll_thread_times(0, 1); 2019 2020 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 2021 2022 ctrlr->fail_reset = true; 2023 2024 poll_threads(); 2025 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2026 poll_threads(); 2027 2028 CU_ASSERT(nvme_ctrlr->resetting == false); 2029 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2030 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2031 2032 spdk_put_io_channel(ch1); 2033 2034 set_thread(1); 2035 2036 spdk_put_io_channel(ch2); 2037 2038 poll_threads(); 2039 2040 set_thread(0); 2041 2042 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2043 CU_ASSERT(rc == 0); 2044 2045 poll_threads(); 2046 spdk_delay_us(1000); 2047 poll_threads(); 2048 2049 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2050 2051 free(first_bdev_io); 2052 free(second_bdev_io); 2053 } 2054 2055 static void 2056 test_attach_ctrlr(void) 2057 { 2058 struct spdk_nvme_transport_id trid = {}; 2059 struct spdk_nvme_ctrlr *ctrlr; 2060 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2061 struct nvme_ctrlr *nvme_ctrlr; 2062 const int STRING_SIZE = 32; 2063 const char *attached_names[STRING_SIZE]; 2064 struct nvme_bdev *nbdev; 2065 int rc; 2066 2067 set_thread(0); 2068 2069 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2070 ut_init_trid(&trid); 2071 2072 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2073 * by probe polling. 2074 */ 2075 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2076 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2077 2078 ctrlr->is_failed = true; 2079 g_ut_attach_ctrlr_status = -EIO; 2080 g_ut_attach_bdev_count = 0; 2081 2082 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2083 attach_ctrlr_done, NULL, &opts, NULL, false); 2084 CU_ASSERT(rc == 0); 2085 2086 spdk_delay_us(1000); 2087 poll_threads(); 2088 2089 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2090 2091 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2092 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2093 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2094 2095 g_ut_attach_ctrlr_status = 0; 2096 2097 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2098 attach_ctrlr_done, NULL, &opts, NULL, false); 2099 CU_ASSERT(rc == 0); 2100 2101 spdk_delay_us(1000); 2102 poll_threads(); 2103 2104 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2105 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2106 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2107 2108 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2109 CU_ASSERT(rc == 0); 2110 2111 poll_threads(); 2112 spdk_delay_us(1000); 2113 poll_threads(); 2114 2115 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2116 2117 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2118 * one nvme_bdev is created. 2119 */ 2120 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2121 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2122 2123 g_ut_attach_bdev_count = 1; 2124 2125 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2126 attach_ctrlr_done, NULL, &opts, NULL, false); 2127 CU_ASSERT(rc == 0); 2128 2129 spdk_delay_us(1000); 2130 poll_threads(); 2131 2132 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2133 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2134 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2135 2136 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2137 attached_names[0] = NULL; 2138 2139 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2140 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2141 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2142 2143 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2144 CU_ASSERT(rc == 0); 2145 2146 poll_threads(); 2147 spdk_delay_us(1000); 2148 poll_threads(); 2149 2150 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2151 2152 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2153 * created because creating one nvme_bdev failed. 2154 */ 2155 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2156 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2157 2158 g_ut_register_bdev_status = -EINVAL; 2159 g_ut_attach_bdev_count = 0; 2160 2161 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2162 attach_ctrlr_done, NULL, &opts, NULL, false); 2163 CU_ASSERT(rc == 0); 2164 2165 spdk_delay_us(1000); 2166 poll_threads(); 2167 2168 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2169 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2170 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2171 2172 CU_ASSERT(attached_names[0] == NULL); 2173 2174 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2175 CU_ASSERT(rc == 0); 2176 2177 poll_threads(); 2178 spdk_delay_us(1000); 2179 poll_threads(); 2180 2181 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2182 2183 g_ut_register_bdev_status = 0; 2184 } 2185 2186 static void 2187 test_aer_cb(void) 2188 { 2189 struct spdk_nvme_transport_id trid = {}; 2190 struct spdk_nvme_ctrlr *ctrlr; 2191 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2192 struct nvme_ctrlr *nvme_ctrlr; 2193 struct nvme_bdev *bdev; 2194 const int STRING_SIZE = 32; 2195 const char *attached_names[STRING_SIZE]; 2196 union spdk_nvme_async_event_completion event = {}; 2197 struct spdk_nvme_cpl cpl = {}; 2198 int rc; 2199 2200 set_thread(0); 2201 2202 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2203 ut_init_trid(&trid); 2204 2205 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2206 * namespaces are populated. 2207 */ 2208 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2209 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2210 2211 ctrlr->ns[0].is_active = false; 2212 2213 g_ut_attach_ctrlr_status = 0; 2214 g_ut_attach_bdev_count = 3; 2215 2216 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2217 attach_ctrlr_done, NULL, &opts, NULL, false); 2218 CU_ASSERT(rc == 0); 2219 2220 spdk_delay_us(1000); 2221 poll_threads(); 2222 2223 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2224 poll_threads(); 2225 2226 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2227 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2228 2229 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2230 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2231 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2232 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2233 2234 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2235 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2236 CU_ASSERT(bdev->disk.blockcnt == 1024); 2237 2238 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2239 * change the size of the 4th namespace. 2240 */ 2241 ctrlr->ns[0].is_active = true; 2242 ctrlr->ns[2].is_active = false; 2243 ctrlr->nsdata[3].nsze = 2048; 2244 2245 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2246 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2247 cpl.cdw0 = event.raw; 2248 2249 aer_cb(nvme_ctrlr, &cpl); 2250 2251 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2252 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2253 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2254 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2255 CU_ASSERT(bdev->disk.blockcnt == 2048); 2256 2257 /* Change ANA state of active namespaces. */ 2258 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2259 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2260 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2261 2262 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2263 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2264 cpl.cdw0 = event.raw; 2265 2266 aer_cb(nvme_ctrlr, &cpl); 2267 2268 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2269 poll_threads(); 2270 2271 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2272 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2273 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2274 2275 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2276 CU_ASSERT(rc == 0); 2277 2278 poll_threads(); 2279 spdk_delay_us(1000); 2280 poll_threads(); 2281 2282 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2283 } 2284 2285 static void 2286 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2287 enum spdk_bdev_io_type io_type) 2288 { 2289 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2290 struct nvme_io_path *io_path; 2291 struct spdk_nvme_qpair *qpair; 2292 2293 io_path = bdev_nvme_find_io_path(nbdev_ch); 2294 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2295 qpair = io_path->qpair->qpair; 2296 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2297 2298 bdev_io->type = io_type; 2299 bdev_io->internal.in_submit_request = true; 2300 2301 bdev_nvme_submit_request(ch, bdev_io); 2302 2303 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2304 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2305 2306 poll_threads(); 2307 2308 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2309 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2310 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2311 } 2312 2313 static void 2314 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2315 enum spdk_bdev_io_type io_type) 2316 { 2317 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2318 struct nvme_io_path *io_path; 2319 struct spdk_nvme_qpair *qpair; 2320 2321 io_path = bdev_nvme_find_io_path(nbdev_ch); 2322 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2323 qpair = io_path->qpair->qpair; 2324 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2325 2326 bdev_io->type = io_type; 2327 bdev_io->internal.in_submit_request = true; 2328 2329 bdev_nvme_submit_request(ch, bdev_io); 2330 2331 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2332 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2333 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2334 } 2335 2336 static void 2337 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2338 { 2339 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2340 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2341 struct ut_nvme_req *req; 2342 struct nvme_io_path *io_path; 2343 struct spdk_nvme_qpair *qpair; 2344 2345 io_path = bdev_nvme_find_io_path(nbdev_ch); 2346 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2347 qpair = io_path->qpair->qpair; 2348 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2349 2350 /* Only compare and write now. */ 2351 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2352 bdev_io->internal.in_submit_request = true; 2353 2354 bdev_nvme_submit_request(ch, bdev_io); 2355 2356 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2357 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2358 CU_ASSERT(bio->first_fused_submitted == true); 2359 2360 /* First outstanding request is compare operation. */ 2361 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2362 SPDK_CU_ASSERT_FATAL(req != NULL); 2363 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2364 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2365 2366 poll_threads(); 2367 2368 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2369 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2370 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2371 } 2372 2373 static void 2374 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2375 struct spdk_nvme_ctrlr *ctrlr) 2376 { 2377 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2378 bdev_io->internal.in_submit_request = true; 2379 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2380 2381 bdev_nvme_submit_request(ch, bdev_io); 2382 2383 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2384 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2385 2386 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2387 poll_thread_times(1, 1); 2388 2389 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2390 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2391 2392 poll_thread_times(0, 1); 2393 2394 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2395 } 2396 2397 static void 2398 test_submit_nvme_cmd(void) 2399 { 2400 struct spdk_nvme_transport_id trid = {}; 2401 struct spdk_nvme_ctrlr *ctrlr; 2402 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2403 struct nvme_ctrlr *nvme_ctrlr; 2404 const int STRING_SIZE = 32; 2405 const char *attached_names[STRING_SIZE]; 2406 struct nvme_bdev *bdev; 2407 struct spdk_bdev_io *bdev_io; 2408 struct spdk_io_channel *ch; 2409 int rc; 2410 2411 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2412 ut_init_trid(&trid); 2413 2414 set_thread(1); 2415 2416 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2417 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2418 2419 g_ut_attach_ctrlr_status = 0; 2420 g_ut_attach_bdev_count = 1; 2421 2422 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2423 attach_ctrlr_done, NULL, &opts, NULL, false); 2424 CU_ASSERT(rc == 0); 2425 2426 spdk_delay_us(1000); 2427 poll_threads(); 2428 2429 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2430 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2431 2432 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2433 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2434 2435 set_thread(0); 2436 2437 ch = spdk_get_io_channel(bdev); 2438 SPDK_CU_ASSERT_FATAL(ch != NULL); 2439 2440 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2441 2442 bdev_io->u.bdev.iovs = NULL; 2443 2444 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2445 2446 ut_bdev_io_set_buf(bdev_io); 2447 2448 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2449 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2450 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2451 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2452 2453 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2454 2455 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2456 2457 /* Verify that ext NVME API is called when data is described by memory domain */ 2458 g_ut_read_ext_called = false; 2459 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2460 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2461 CU_ASSERT(g_ut_read_ext_called == true); 2462 g_ut_read_ext_called = false; 2463 bdev_io->u.bdev.memory_domain = NULL; 2464 2465 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2466 2467 free(bdev_io); 2468 2469 spdk_put_io_channel(ch); 2470 2471 poll_threads(); 2472 2473 set_thread(1); 2474 2475 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2476 CU_ASSERT(rc == 0); 2477 2478 poll_threads(); 2479 spdk_delay_us(1000); 2480 poll_threads(); 2481 2482 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2483 } 2484 2485 static void 2486 test_add_remove_trid(void) 2487 { 2488 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2489 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2490 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2491 struct nvme_ctrlr *nvme_ctrlr = NULL; 2492 const int STRING_SIZE = 32; 2493 const char *attached_names[STRING_SIZE]; 2494 struct nvme_path_id *ctrid; 2495 int rc; 2496 2497 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2498 ut_init_trid(&path1.trid); 2499 ut_init_trid2(&path2.trid); 2500 ut_init_trid3(&path3.trid); 2501 2502 set_thread(0); 2503 2504 g_ut_attach_ctrlr_status = 0; 2505 g_ut_attach_bdev_count = 0; 2506 2507 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2508 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2509 2510 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2511 attach_ctrlr_done, NULL, &opts, NULL, false); 2512 CU_ASSERT(rc == 0); 2513 2514 spdk_delay_us(1000); 2515 poll_threads(); 2516 2517 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2518 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2519 2520 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2521 2522 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2523 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2524 2525 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2526 attach_ctrlr_done, NULL, &opts, NULL, false); 2527 CU_ASSERT(rc == 0); 2528 2529 spdk_delay_us(1000); 2530 poll_threads(); 2531 2532 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2533 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2534 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2535 break; 2536 } 2537 } 2538 CU_ASSERT(ctrid != NULL); 2539 2540 /* trid3 is not in the registered list. */ 2541 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2542 CU_ASSERT(rc == -ENXIO); 2543 2544 /* trid2 is not used, and simply removed. */ 2545 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2546 CU_ASSERT(rc == 0); 2547 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2548 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2549 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2550 } 2551 2552 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2553 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2554 2555 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2556 attach_ctrlr_done, NULL, &opts, NULL, false); 2557 CU_ASSERT(rc == 0); 2558 2559 spdk_delay_us(1000); 2560 poll_threads(); 2561 2562 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2563 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2564 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2565 break; 2566 } 2567 } 2568 CU_ASSERT(ctrid != NULL); 2569 2570 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2571 * If we add path2 again, path2 should be inserted between path1 and path3. 2572 * Then, we remove path2. It is not used, and simply removed. 2573 */ 2574 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2575 2576 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2577 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2578 2579 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2580 attach_ctrlr_done, NULL, &opts, NULL, false); 2581 CU_ASSERT(rc == 0); 2582 2583 spdk_delay_us(1000); 2584 poll_threads(); 2585 2586 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2587 2588 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2589 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2590 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2591 2592 ctrid = TAILQ_NEXT(ctrid, link); 2593 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2594 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2595 2596 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2597 CU_ASSERT(rc == 0); 2598 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2599 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2600 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2601 } 2602 2603 /* path1 is currently used and path3 is an alternative path. 2604 * If we remove path1, path is changed to path3. 2605 */ 2606 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2607 CU_ASSERT(rc == 0); 2608 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2609 CU_ASSERT(nvme_ctrlr->resetting == true); 2610 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2611 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2612 } 2613 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2614 2615 poll_threads(); 2616 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2617 poll_threads(); 2618 2619 CU_ASSERT(nvme_ctrlr->resetting == false); 2620 2621 /* path3 is the current and only path. If we remove path3, the corresponding 2622 * nvme_ctrlr is removed. 2623 */ 2624 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2625 CU_ASSERT(rc == 0); 2626 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2627 2628 poll_threads(); 2629 spdk_delay_us(1000); 2630 poll_threads(); 2631 2632 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2633 2634 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2635 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2636 2637 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2638 attach_ctrlr_done, NULL, &opts, NULL, false); 2639 CU_ASSERT(rc == 0); 2640 2641 spdk_delay_us(1000); 2642 poll_threads(); 2643 2644 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2645 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2646 2647 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2648 2649 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2650 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2651 2652 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2653 attach_ctrlr_done, NULL, &opts, NULL, false); 2654 CU_ASSERT(rc == 0); 2655 2656 spdk_delay_us(1000); 2657 poll_threads(); 2658 2659 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2660 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2661 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2662 break; 2663 } 2664 } 2665 CU_ASSERT(ctrid != NULL); 2666 2667 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2668 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2669 CU_ASSERT(rc == 0); 2670 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2671 2672 poll_threads(); 2673 spdk_delay_us(1000); 2674 poll_threads(); 2675 2676 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2677 } 2678 2679 static void 2680 test_abort(void) 2681 { 2682 struct spdk_nvme_transport_id trid = {}; 2683 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 2684 struct spdk_nvme_ctrlr *ctrlr; 2685 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 2686 struct nvme_ctrlr *nvme_ctrlr; 2687 const int STRING_SIZE = 32; 2688 const char *attached_names[STRING_SIZE]; 2689 struct nvme_bdev *bdev; 2690 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2691 struct spdk_io_channel *ch1, *ch2; 2692 struct nvme_bdev_channel *nbdev_ch1; 2693 struct nvme_io_path *io_path1; 2694 struct nvme_qpair *nvme_qpair1; 2695 int rc; 2696 2697 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2698 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2699 * are submitted on thread 1. Both should succeed. 2700 */ 2701 2702 ut_init_trid(&trid); 2703 2704 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2705 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2706 2707 g_ut_attach_ctrlr_status = 0; 2708 g_ut_attach_bdev_count = 1; 2709 2710 set_thread(1); 2711 2712 opts.ctrlr_loss_timeout_sec = -1; 2713 opts.reconnect_delay_sec = 1; 2714 2715 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2716 attach_ctrlr_done, NULL, &dopts, &opts, false); 2717 CU_ASSERT(rc == 0); 2718 2719 spdk_delay_us(1000); 2720 poll_threads(); 2721 2722 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2723 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2724 2725 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2726 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2727 2728 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2729 ut_bdev_io_set_buf(write_io); 2730 2731 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2732 ut_bdev_io_set_buf(fuse_io); 2733 2734 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2735 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2736 2737 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2738 2739 set_thread(0); 2740 2741 ch1 = spdk_get_io_channel(bdev); 2742 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2743 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2744 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2745 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2746 nvme_qpair1 = io_path1->qpair; 2747 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2748 2749 set_thread(1); 2750 2751 ch2 = spdk_get_io_channel(bdev); 2752 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2753 2754 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2755 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2756 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2757 2758 /* Aborting the already completed request should fail. */ 2759 write_io->internal.in_submit_request = true; 2760 bdev_nvme_submit_request(ch1, write_io); 2761 poll_threads(); 2762 2763 CU_ASSERT(write_io->internal.in_submit_request == false); 2764 2765 abort_io->u.abort.bio_to_abort = write_io; 2766 abort_io->internal.in_submit_request = true; 2767 2768 bdev_nvme_submit_request(ch1, abort_io); 2769 2770 poll_threads(); 2771 2772 CU_ASSERT(abort_io->internal.in_submit_request == false); 2773 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2774 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2775 2776 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2777 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2778 2779 admin_io->internal.in_submit_request = true; 2780 bdev_nvme_submit_request(ch1, admin_io); 2781 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2782 poll_threads(); 2783 2784 CU_ASSERT(admin_io->internal.in_submit_request == false); 2785 2786 abort_io->u.abort.bio_to_abort = admin_io; 2787 abort_io->internal.in_submit_request = true; 2788 2789 bdev_nvme_submit_request(ch2, abort_io); 2790 2791 poll_threads(); 2792 2793 CU_ASSERT(abort_io->internal.in_submit_request == false); 2794 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2795 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2796 2797 /* Aborting the write request should succeed. */ 2798 write_io->internal.in_submit_request = true; 2799 bdev_nvme_submit_request(ch1, write_io); 2800 2801 CU_ASSERT(write_io->internal.in_submit_request == true); 2802 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2803 2804 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2805 abort_io->u.abort.bio_to_abort = write_io; 2806 abort_io->internal.in_submit_request = true; 2807 2808 bdev_nvme_submit_request(ch1, abort_io); 2809 2810 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2811 poll_threads(); 2812 2813 CU_ASSERT(abort_io->internal.in_submit_request == false); 2814 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2815 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2816 CU_ASSERT(write_io->internal.in_submit_request == false); 2817 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2818 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2819 2820 /* Aborting the fuse request should succeed. */ 2821 fuse_io->internal.in_submit_request = true; 2822 bdev_nvme_submit_request(ch1, fuse_io); 2823 2824 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2825 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2826 2827 abort_io->u.abort.bio_to_abort = fuse_io; 2828 abort_io->internal.in_submit_request = true; 2829 2830 bdev_nvme_submit_request(ch1, abort_io); 2831 2832 spdk_delay_us(10000); 2833 poll_threads(); 2834 2835 CU_ASSERT(abort_io->internal.in_submit_request == false); 2836 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2837 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2838 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2839 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2840 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2841 2842 /* Aborting the admin request should succeed. */ 2843 admin_io->internal.in_submit_request = true; 2844 bdev_nvme_submit_request(ch1, admin_io); 2845 2846 CU_ASSERT(admin_io->internal.in_submit_request == true); 2847 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2848 2849 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2850 abort_io->u.abort.bio_to_abort = admin_io; 2851 abort_io->internal.in_submit_request = true; 2852 2853 bdev_nvme_submit_request(ch2, abort_io); 2854 2855 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2856 poll_threads(); 2857 2858 CU_ASSERT(abort_io->internal.in_submit_request == false); 2859 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2860 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2861 CU_ASSERT(admin_io->internal.in_submit_request == false); 2862 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2863 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2864 2865 set_thread(0); 2866 2867 /* If qpair is disconnected, it is freed and then reconnected via resetting 2868 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2869 * while resetting the nvme_ctrlr. 2870 */ 2871 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2872 2873 poll_thread_times(0, 3); 2874 2875 CU_ASSERT(nvme_qpair1->qpair == NULL); 2876 CU_ASSERT(nvme_ctrlr->resetting == true); 2877 2878 write_io->internal.in_submit_request = true; 2879 2880 bdev_nvme_submit_request(ch1, write_io); 2881 2882 CU_ASSERT(write_io->internal.in_submit_request == true); 2883 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 2884 2885 /* Aborting the queued write request should succeed immediately. */ 2886 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2887 abort_io->u.abort.bio_to_abort = write_io; 2888 abort_io->internal.in_submit_request = true; 2889 2890 bdev_nvme_submit_request(ch1, abort_io); 2891 2892 CU_ASSERT(abort_io->internal.in_submit_request == false); 2893 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2894 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2895 CU_ASSERT(write_io->internal.in_submit_request == false); 2896 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2897 2898 poll_threads(); 2899 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2900 poll_threads(); 2901 2902 spdk_put_io_channel(ch1); 2903 2904 set_thread(1); 2905 2906 spdk_put_io_channel(ch2); 2907 2908 poll_threads(); 2909 2910 free(write_io); 2911 free(fuse_io); 2912 free(admin_io); 2913 free(abort_io); 2914 2915 set_thread(1); 2916 2917 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2918 CU_ASSERT(rc == 0); 2919 2920 poll_threads(); 2921 spdk_delay_us(1000); 2922 poll_threads(); 2923 2924 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2925 } 2926 2927 static void 2928 test_get_io_qpair(void) 2929 { 2930 struct spdk_nvme_transport_id trid = {}; 2931 struct spdk_nvme_ctrlr ctrlr = {}; 2932 struct nvme_ctrlr *nvme_ctrlr = NULL; 2933 struct spdk_io_channel *ch; 2934 struct nvme_ctrlr_channel *ctrlr_ch; 2935 struct spdk_nvme_qpair *qpair; 2936 int rc; 2937 2938 ut_init_trid(&trid); 2939 TAILQ_INIT(&ctrlr.active_io_qpairs); 2940 2941 set_thread(0); 2942 2943 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2944 CU_ASSERT(rc == 0); 2945 2946 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2947 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2948 2949 ch = spdk_get_io_channel(nvme_ctrlr); 2950 SPDK_CU_ASSERT_FATAL(ch != NULL); 2951 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2952 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2953 2954 qpair = bdev_nvme_get_io_qpair(ch); 2955 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2956 2957 spdk_put_io_channel(ch); 2958 2959 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2960 CU_ASSERT(rc == 0); 2961 2962 poll_threads(); 2963 spdk_delay_us(1000); 2964 poll_threads(); 2965 2966 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2967 } 2968 2969 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2970 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2971 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2972 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2973 */ 2974 static void 2975 test_bdev_unregister(void) 2976 { 2977 struct spdk_nvme_transport_id trid = {}; 2978 struct spdk_nvme_ctrlr *ctrlr; 2979 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2980 struct nvme_ctrlr *nvme_ctrlr; 2981 struct nvme_ns *nvme_ns1, *nvme_ns2; 2982 const int STRING_SIZE = 32; 2983 const char *attached_names[STRING_SIZE]; 2984 struct nvme_bdev *bdev1, *bdev2; 2985 int rc; 2986 2987 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2988 ut_init_trid(&trid); 2989 2990 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2991 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2992 2993 g_ut_attach_ctrlr_status = 0; 2994 g_ut_attach_bdev_count = 2; 2995 2996 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2997 attach_ctrlr_done, NULL, &opts, NULL, false); 2998 CU_ASSERT(rc == 0); 2999 3000 spdk_delay_us(1000); 3001 poll_threads(); 3002 3003 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3004 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3005 3006 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 3007 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3008 3009 bdev1 = nvme_ns1->bdev; 3010 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3011 3012 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 3013 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3014 3015 bdev2 = nvme_ns2->bdev; 3016 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3017 3018 bdev_nvme_destruct(&bdev1->disk); 3019 bdev_nvme_destruct(&bdev2->disk); 3020 3021 poll_threads(); 3022 3023 CU_ASSERT(nvme_ns1->bdev == NULL); 3024 CU_ASSERT(nvme_ns2->bdev == NULL); 3025 3026 nvme_ctrlr->destruct = true; 3027 _nvme_ctrlr_destruct(nvme_ctrlr); 3028 3029 poll_threads(); 3030 spdk_delay_us(1000); 3031 poll_threads(); 3032 3033 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3034 } 3035 3036 static void 3037 test_compare_ns(void) 3038 { 3039 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3040 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3041 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3042 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3043 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3044 3045 /* No IDs are defined. */ 3046 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3047 3048 /* Only EUI64 are defined and not matched. */ 3049 nsdata1.eui64 = 0xABCDEF0123456789; 3050 nsdata2.eui64 = 0xBBCDEF0123456789; 3051 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3052 3053 /* Only EUI64 are defined and matched. */ 3054 nsdata2.eui64 = 0xABCDEF0123456789; 3055 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3056 3057 /* Only NGUID are defined and not matched. */ 3058 nsdata1.eui64 = 0x0; 3059 nsdata2.eui64 = 0x0; 3060 nsdata1.nguid[0] = 0x12; 3061 nsdata2.nguid[0] = 0x10; 3062 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3063 3064 /* Only NGUID are defined and matched. */ 3065 nsdata2.nguid[0] = 0x12; 3066 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3067 3068 /* Only UUID are defined and not matched. */ 3069 nsdata1.nguid[0] = 0x0; 3070 nsdata2.nguid[0] = 0x0; 3071 ns1.uuid = &uuid1; 3072 ns2.uuid = &uuid2; 3073 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3074 3075 /* Only one UUID is defined. */ 3076 ns1.uuid = NULL; 3077 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3078 3079 /* Only UUID are defined and matched. */ 3080 ns1.uuid = &uuid2; 3081 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3082 3083 /* All EUI64, NGUID, and UUID are defined and matched. */ 3084 nsdata1.eui64 = 0x123456789ABCDEF; 3085 nsdata2.eui64 = 0x123456789ABCDEF; 3086 nsdata1.nguid[15] = 0x34; 3087 nsdata2.nguid[15] = 0x34; 3088 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3089 3090 /* CSI are not matched. */ 3091 ns1.csi = SPDK_NVME_CSI_ZNS; 3092 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3093 } 3094 3095 static void 3096 test_init_ana_log_page(void) 3097 { 3098 struct spdk_nvme_transport_id trid = {}; 3099 struct spdk_nvme_ctrlr *ctrlr; 3100 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3101 struct nvme_ctrlr *nvme_ctrlr; 3102 const int STRING_SIZE = 32; 3103 const char *attached_names[STRING_SIZE]; 3104 int rc; 3105 3106 set_thread(0); 3107 3108 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3109 ut_init_trid(&trid); 3110 3111 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3112 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3113 3114 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3115 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3116 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3117 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3118 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3119 3120 g_ut_attach_ctrlr_status = 0; 3121 g_ut_attach_bdev_count = 5; 3122 3123 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3124 attach_ctrlr_done, NULL, &opts, NULL, false); 3125 CU_ASSERT(rc == 0); 3126 3127 spdk_delay_us(1000); 3128 poll_threads(); 3129 3130 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3131 poll_threads(); 3132 3133 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3134 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3135 3136 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3137 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3138 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3139 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3140 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3141 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3142 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3143 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3144 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3145 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3146 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3147 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3148 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3149 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3150 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3151 3152 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3153 CU_ASSERT(rc == 0); 3154 3155 poll_threads(); 3156 spdk_delay_us(1000); 3157 poll_threads(); 3158 3159 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3160 } 3161 3162 static void 3163 init_accel(void) 3164 { 3165 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3166 sizeof(int), "accel_p"); 3167 } 3168 3169 static void 3170 fini_accel(void) 3171 { 3172 spdk_io_device_unregister(g_accel_p, NULL); 3173 } 3174 3175 static void 3176 test_get_memory_domains(void) 3177 { 3178 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3179 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3180 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3181 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3182 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3183 struct spdk_memory_domain *domains[4] = {}; 3184 int rc = 0; 3185 3186 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3187 3188 /* nvme controller doesn't have memory domains */ 3189 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3190 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3191 CU_ASSERT(rc == 0); 3192 CU_ASSERT(domains[0] == NULL); 3193 CU_ASSERT(domains[1] == NULL); 3194 3195 /* nvme controller has a memory domain */ 3196 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3197 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3198 CU_ASSERT(rc == 1); 3199 CU_ASSERT(domains[0] != NULL); 3200 memset(domains, 0, sizeof(domains)); 3201 3202 /* multipath, 2 controllers report 1 memory domain each */ 3203 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3204 3205 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3206 CU_ASSERT(rc == 2); 3207 CU_ASSERT(domains[0] != NULL); 3208 CU_ASSERT(domains[1] != NULL); 3209 memset(domains, 0, sizeof(domains)); 3210 3211 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3212 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3213 CU_ASSERT(rc == 2); 3214 3215 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3216 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3217 CU_ASSERT(rc == 2); 3218 CU_ASSERT(domains[0] == NULL); 3219 CU_ASSERT(domains[1] == NULL); 3220 3221 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3222 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3223 CU_ASSERT(rc == 2); 3224 CU_ASSERT(domains[0] != NULL); 3225 CU_ASSERT(domains[1] == NULL); 3226 memset(domains, 0, sizeof(domains)); 3227 3228 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3229 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3230 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3231 CU_ASSERT(rc == 4); 3232 CU_ASSERT(domains[0] != NULL); 3233 CU_ASSERT(domains[1] != NULL); 3234 CU_ASSERT(domains[2] != NULL); 3235 CU_ASSERT(domains[3] != NULL); 3236 memset(domains, 0, sizeof(domains)); 3237 3238 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3239 * Array size is less than the number of memory domains */ 3240 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3241 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3242 CU_ASSERT(rc == 4); 3243 CU_ASSERT(domains[0] != NULL); 3244 CU_ASSERT(domains[1] != NULL); 3245 CU_ASSERT(domains[2] != NULL); 3246 CU_ASSERT(domains[3] == NULL); 3247 memset(domains, 0, sizeof(domains)); 3248 3249 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3250 } 3251 3252 static void 3253 test_reconnect_qpair(void) 3254 { 3255 struct spdk_nvme_transport_id trid = {}; 3256 struct spdk_nvme_ctrlr *ctrlr; 3257 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3258 struct nvme_ctrlr *nvme_ctrlr; 3259 const int STRING_SIZE = 32; 3260 const char *attached_names[STRING_SIZE]; 3261 struct nvme_bdev *bdev; 3262 struct spdk_io_channel *ch1, *ch2; 3263 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3264 struct nvme_io_path *io_path1, *io_path2; 3265 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3266 int rc; 3267 3268 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3269 ut_init_trid(&trid); 3270 3271 set_thread(0); 3272 3273 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3274 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3275 3276 g_ut_attach_ctrlr_status = 0; 3277 g_ut_attach_bdev_count = 1; 3278 3279 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3280 attach_ctrlr_done, NULL, &opts, NULL, false); 3281 CU_ASSERT(rc == 0); 3282 3283 spdk_delay_us(1000); 3284 poll_threads(); 3285 3286 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3287 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3288 3289 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3290 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3291 3292 ch1 = spdk_get_io_channel(bdev); 3293 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3294 3295 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3296 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3297 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3298 nvme_qpair1 = io_path1->qpair; 3299 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3300 3301 set_thread(1); 3302 3303 ch2 = spdk_get_io_channel(bdev); 3304 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3305 3306 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3307 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3308 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3309 nvme_qpair2 = io_path2->qpair; 3310 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3311 3312 /* If a qpair is disconnected, it is freed and then reconnected via 3313 * resetting the corresponding nvme_ctrlr. 3314 */ 3315 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3316 ctrlr->is_failed = true; 3317 3318 poll_thread_times(1, 3); 3319 CU_ASSERT(nvme_qpair1->qpair != NULL); 3320 CU_ASSERT(nvme_qpair2->qpair == NULL); 3321 CU_ASSERT(nvme_ctrlr->resetting == true); 3322 3323 poll_thread_times(0, 3); 3324 CU_ASSERT(nvme_qpair1->qpair == NULL); 3325 CU_ASSERT(nvme_qpair2->qpair == NULL); 3326 CU_ASSERT(ctrlr->is_failed == true); 3327 3328 poll_thread_times(1, 2); 3329 poll_thread_times(0, 1); 3330 CU_ASSERT(ctrlr->is_failed == false); 3331 CU_ASSERT(ctrlr->adminq.is_connected == false); 3332 3333 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3334 poll_thread_times(0, 2); 3335 CU_ASSERT(ctrlr->adminq.is_connected == true); 3336 3337 poll_thread_times(0, 1); 3338 poll_thread_times(1, 1); 3339 CU_ASSERT(nvme_qpair1->qpair != NULL); 3340 CU_ASSERT(nvme_qpair2->qpair != NULL); 3341 CU_ASSERT(nvme_ctrlr->resetting == true); 3342 3343 poll_thread_times(0, 2); 3344 poll_thread_times(1, 1); 3345 poll_thread_times(0, 1); 3346 CU_ASSERT(nvme_ctrlr->resetting == false); 3347 3348 poll_threads(); 3349 3350 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3351 * fails, the qpair is just freed. 3352 */ 3353 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3354 ctrlr->is_failed = true; 3355 ctrlr->fail_reset = true; 3356 3357 poll_thread_times(1, 3); 3358 CU_ASSERT(nvme_qpair1->qpair != NULL); 3359 CU_ASSERT(nvme_qpair2->qpair == NULL); 3360 CU_ASSERT(nvme_ctrlr->resetting == true); 3361 3362 poll_thread_times(0, 3); 3363 poll_thread_times(1, 1); 3364 CU_ASSERT(nvme_qpair1->qpair == NULL); 3365 CU_ASSERT(nvme_qpair2->qpair == NULL); 3366 CU_ASSERT(ctrlr->is_failed == true); 3367 3368 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3369 poll_thread_times(0, 3); 3370 poll_thread_times(1, 1); 3371 poll_thread_times(0, 1); 3372 CU_ASSERT(ctrlr->is_failed == true); 3373 CU_ASSERT(nvme_ctrlr->resetting == false); 3374 CU_ASSERT(nvme_qpair1->qpair == NULL); 3375 CU_ASSERT(nvme_qpair2->qpair == NULL); 3376 3377 poll_threads(); 3378 3379 spdk_put_io_channel(ch2); 3380 3381 set_thread(0); 3382 3383 spdk_put_io_channel(ch1); 3384 3385 poll_threads(); 3386 3387 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3388 CU_ASSERT(rc == 0); 3389 3390 poll_threads(); 3391 spdk_delay_us(1000); 3392 poll_threads(); 3393 3394 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3395 } 3396 3397 static void 3398 test_create_bdev_ctrlr(void) 3399 { 3400 struct nvme_path_id path1 = {}, path2 = {}; 3401 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3402 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3403 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3404 const int STRING_SIZE = 32; 3405 const char *attached_names[STRING_SIZE]; 3406 int rc; 3407 3408 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3409 ut_init_trid(&path1.trid); 3410 ut_init_trid2(&path2.trid); 3411 3412 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3413 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3414 3415 g_ut_attach_ctrlr_status = 0; 3416 g_ut_attach_bdev_count = 0; 3417 3418 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3419 attach_ctrlr_done, NULL, &opts, NULL, true); 3420 CU_ASSERT(rc == 0); 3421 3422 spdk_delay_us(1000); 3423 poll_threads(); 3424 3425 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3426 poll_threads(); 3427 3428 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3429 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3430 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3431 3432 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3433 g_ut_attach_ctrlr_status = -EINVAL; 3434 3435 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3436 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3437 3438 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3439 3440 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3441 attach_ctrlr_done, NULL, &opts, NULL, true); 3442 CU_ASSERT(rc == 0); 3443 3444 spdk_delay_us(1000); 3445 poll_threads(); 3446 3447 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3448 poll_threads(); 3449 3450 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3451 3452 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3453 g_ut_attach_ctrlr_status = 0; 3454 3455 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3456 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3457 3458 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3459 attach_ctrlr_done, NULL, &opts, NULL, true); 3460 CU_ASSERT(rc == 0); 3461 3462 spdk_delay_us(1000); 3463 poll_threads(); 3464 3465 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3466 poll_threads(); 3467 3468 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3469 3470 /* Delete two ctrlrs at once. */ 3471 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3472 CU_ASSERT(rc == 0); 3473 3474 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3475 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3476 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3477 3478 poll_threads(); 3479 spdk_delay_us(1000); 3480 poll_threads(); 3481 3482 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3483 3484 /* Add two ctrlrs and delete one by one. */ 3485 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3486 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3487 3488 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3489 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3490 3491 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3492 attach_ctrlr_done, NULL, &opts, NULL, true); 3493 CU_ASSERT(rc == 0); 3494 3495 spdk_delay_us(1000); 3496 poll_threads(); 3497 3498 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3499 poll_threads(); 3500 3501 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3502 attach_ctrlr_done, NULL, &opts, NULL, true); 3503 CU_ASSERT(rc == 0); 3504 3505 spdk_delay_us(1000); 3506 poll_threads(); 3507 3508 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3509 poll_threads(); 3510 3511 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3512 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3513 3514 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3515 CU_ASSERT(rc == 0); 3516 3517 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3518 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3519 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3520 3521 poll_threads(); 3522 spdk_delay_us(1000); 3523 poll_threads(); 3524 3525 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3526 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3527 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3528 3529 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3530 CU_ASSERT(rc == 0); 3531 3532 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3533 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3534 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3535 3536 poll_threads(); 3537 spdk_delay_us(1000); 3538 poll_threads(); 3539 3540 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3541 } 3542 3543 static struct nvme_ns * 3544 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3545 { 3546 struct nvme_ns *nvme_ns; 3547 3548 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3549 if (nvme_ns->ctrlr == nvme_ctrlr) { 3550 return nvme_ns; 3551 } 3552 } 3553 3554 return NULL; 3555 } 3556 3557 static void 3558 test_add_multi_ns_to_bdev(void) 3559 { 3560 struct nvme_path_id path1 = {}, path2 = {}; 3561 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3562 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3563 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3564 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3565 struct nvme_ns *nvme_ns1, *nvme_ns2; 3566 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3567 const int STRING_SIZE = 32; 3568 const char *attached_names[STRING_SIZE]; 3569 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3570 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3571 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3572 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3573 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3574 int rc; 3575 3576 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3577 ut_init_trid(&path1.trid); 3578 ut_init_trid2(&path2.trid); 3579 3580 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3581 3582 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3583 * namespaces are populated. 3584 */ 3585 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3586 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3587 3588 ctrlr1->ns[1].is_active = false; 3589 ctrlr1->ns[4].is_active = false; 3590 ctrlr1->ns[0].uuid = &uuid1; 3591 ctrlr1->ns[2].uuid = &uuid3; 3592 ctrlr1->ns[3].uuid = &uuid4; 3593 3594 g_ut_attach_ctrlr_status = 0; 3595 g_ut_attach_bdev_count = 3; 3596 3597 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3598 attach_ctrlr_done, NULL, &opts, NULL, true); 3599 CU_ASSERT(rc == 0); 3600 3601 spdk_delay_us(1000); 3602 poll_threads(); 3603 3604 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3605 poll_threads(); 3606 3607 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3608 * namespaces are populated. The uuid of 4th namespace is different, and hence 3609 * adding 4th namespace to a bdev should fail. 3610 */ 3611 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3612 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3613 3614 ctrlr2->ns[2].is_active = false; 3615 ctrlr2->ns[4].is_active = false; 3616 ctrlr2->ns[0].uuid = &uuid1; 3617 ctrlr2->ns[1].uuid = &uuid2; 3618 ctrlr2->ns[3].uuid = &uuid44; 3619 3620 g_ut_attach_ctrlr_status = 0; 3621 g_ut_attach_bdev_count = 2; 3622 3623 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3624 attach_ctrlr_done, NULL, &opts, NULL, true); 3625 CU_ASSERT(rc == 0); 3626 3627 spdk_delay_us(1000); 3628 poll_threads(); 3629 3630 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3631 poll_threads(); 3632 3633 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3634 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3635 3636 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3637 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3638 3639 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3640 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3641 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3642 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3643 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3644 3645 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3646 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3647 3648 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3649 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3650 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3651 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3652 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3653 3654 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3655 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3656 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3657 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3658 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3659 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3660 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3661 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3662 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3663 3664 CU_ASSERT(bdev1->ref == 2); 3665 CU_ASSERT(bdev2->ref == 1); 3666 CU_ASSERT(bdev3->ref == 1); 3667 CU_ASSERT(bdev4->ref == 1); 3668 3669 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3670 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3671 CU_ASSERT(rc == 0); 3672 3673 poll_threads(); 3674 spdk_delay_us(1000); 3675 poll_threads(); 3676 3677 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3678 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3679 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2); 3680 3681 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3682 CU_ASSERT(rc == 0); 3683 3684 poll_threads(); 3685 spdk_delay_us(1000); 3686 poll_threads(); 3687 3688 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3689 3690 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3691 * can be deleted when the bdev subsystem shutdown. 3692 */ 3693 g_ut_attach_bdev_count = 1; 3694 3695 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3696 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3697 3698 ctrlr1->ns[0].uuid = &uuid1; 3699 3700 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3701 attach_ctrlr_done, NULL, &opts, NULL, true); 3702 CU_ASSERT(rc == 0); 3703 3704 spdk_delay_us(1000); 3705 poll_threads(); 3706 3707 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3708 poll_threads(); 3709 3710 ut_init_trid2(&path2.trid); 3711 3712 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3713 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3714 3715 ctrlr2->ns[0].uuid = &uuid1; 3716 3717 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3718 attach_ctrlr_done, NULL, &opts, NULL, true); 3719 CU_ASSERT(rc == 0); 3720 3721 spdk_delay_us(1000); 3722 poll_threads(); 3723 3724 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3725 poll_threads(); 3726 3727 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3728 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3729 3730 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3731 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3732 3733 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3734 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3735 3736 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3737 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3738 3739 /* Check if a nvme_bdev has two nvme_ns. */ 3740 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3741 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3742 CU_ASSERT(nvme_ns1->bdev == bdev1); 3743 3744 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3745 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3746 CU_ASSERT(nvme_ns2->bdev == bdev1); 3747 3748 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3749 bdev_nvme_destruct(&bdev1->disk); 3750 3751 poll_threads(); 3752 3753 CU_ASSERT(nvme_ns1->bdev == NULL); 3754 CU_ASSERT(nvme_ns2->bdev == NULL); 3755 3756 nvme_ctrlr1->destruct = true; 3757 _nvme_ctrlr_destruct(nvme_ctrlr1); 3758 3759 poll_threads(); 3760 spdk_delay_us(1000); 3761 poll_threads(); 3762 3763 nvme_ctrlr2->destruct = true; 3764 _nvme_ctrlr_destruct(nvme_ctrlr2); 3765 3766 poll_threads(); 3767 spdk_delay_us(1000); 3768 poll_threads(); 3769 3770 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3771 } 3772 3773 static void 3774 test_add_multi_io_paths_to_nbdev_ch(void) 3775 { 3776 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3777 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3778 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3779 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3780 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3781 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3782 const int STRING_SIZE = 32; 3783 const char *attached_names[STRING_SIZE]; 3784 struct nvme_bdev *bdev; 3785 struct spdk_io_channel *ch; 3786 struct nvme_bdev_channel *nbdev_ch; 3787 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3788 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3789 int rc; 3790 3791 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3792 ut_init_trid(&path1.trid); 3793 ut_init_trid2(&path2.trid); 3794 ut_init_trid3(&path3.trid); 3795 g_ut_attach_ctrlr_status = 0; 3796 g_ut_attach_bdev_count = 1; 3797 3798 set_thread(1); 3799 3800 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3801 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3802 3803 ctrlr1->ns[0].uuid = &uuid1; 3804 3805 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3806 attach_ctrlr_done, NULL, &opts, NULL, true); 3807 CU_ASSERT(rc == 0); 3808 3809 spdk_delay_us(1000); 3810 poll_threads(); 3811 3812 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3813 poll_threads(); 3814 3815 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3816 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3817 3818 ctrlr2->ns[0].uuid = &uuid1; 3819 3820 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3821 attach_ctrlr_done, NULL, &opts, NULL, true); 3822 CU_ASSERT(rc == 0); 3823 3824 spdk_delay_us(1000); 3825 poll_threads(); 3826 3827 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3828 poll_threads(); 3829 3830 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3831 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3832 3833 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3834 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3835 3836 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3837 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3838 3839 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3840 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3841 3842 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3843 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3844 3845 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3846 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3847 3848 set_thread(0); 3849 3850 ch = spdk_get_io_channel(bdev); 3851 SPDK_CU_ASSERT_FATAL(ch != NULL); 3852 nbdev_ch = spdk_io_channel_get_ctx(ch); 3853 3854 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3855 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3856 3857 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3858 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3859 3860 set_thread(1); 3861 3862 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3863 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3864 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3865 3866 ctrlr3->ns[0].uuid = &uuid1; 3867 3868 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3869 attach_ctrlr_done, NULL, &opts, NULL, true); 3870 CU_ASSERT(rc == 0); 3871 3872 spdk_delay_us(1000); 3873 poll_threads(); 3874 3875 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3876 poll_threads(); 3877 3878 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn); 3879 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3880 3881 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3882 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3883 3884 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3885 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3886 3887 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3888 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3889 CU_ASSERT(rc == 0); 3890 3891 poll_threads(); 3892 spdk_delay_us(1000); 3893 poll_threads(); 3894 3895 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1); 3896 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3897 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3); 3898 3899 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3900 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3901 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3902 3903 set_thread(0); 3904 3905 spdk_put_io_channel(ch); 3906 3907 poll_threads(); 3908 3909 set_thread(1); 3910 3911 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3912 CU_ASSERT(rc == 0); 3913 3914 poll_threads(); 3915 spdk_delay_us(1000); 3916 poll_threads(); 3917 3918 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3919 } 3920 3921 static void 3922 test_admin_path(void) 3923 { 3924 struct nvme_path_id path1 = {}, path2 = {}; 3925 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3926 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3927 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3928 const int STRING_SIZE = 32; 3929 const char *attached_names[STRING_SIZE]; 3930 struct nvme_bdev *bdev; 3931 struct spdk_io_channel *ch; 3932 struct spdk_bdev_io *bdev_io; 3933 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3934 int rc; 3935 3936 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3937 ut_init_trid(&path1.trid); 3938 ut_init_trid2(&path2.trid); 3939 g_ut_attach_ctrlr_status = 0; 3940 g_ut_attach_bdev_count = 1; 3941 3942 set_thread(0); 3943 3944 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3945 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3946 3947 ctrlr1->ns[0].uuid = &uuid1; 3948 3949 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3950 attach_ctrlr_done, NULL, &opts, NULL, true); 3951 CU_ASSERT(rc == 0); 3952 3953 spdk_delay_us(1000); 3954 poll_threads(); 3955 3956 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3957 poll_threads(); 3958 3959 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3960 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3961 3962 ctrlr2->ns[0].uuid = &uuid1; 3963 3964 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3965 attach_ctrlr_done, NULL, &opts, NULL, true); 3966 CU_ASSERT(rc == 0); 3967 3968 spdk_delay_us(1000); 3969 poll_threads(); 3970 3971 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3972 poll_threads(); 3973 3974 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3975 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3976 3977 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3978 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3979 3980 ch = spdk_get_io_channel(bdev); 3981 SPDK_CU_ASSERT_FATAL(ch != NULL); 3982 3983 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3984 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3985 3986 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3987 * submitted to ctrlr2. 3988 */ 3989 ctrlr1->is_failed = true; 3990 bdev_io->internal.in_submit_request = true; 3991 3992 bdev_nvme_submit_request(ch, bdev_io); 3993 3994 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3995 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3996 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3997 3998 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3999 poll_threads(); 4000 4001 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 4002 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4003 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4004 4005 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 4006 ctrlr2->is_failed = true; 4007 bdev_io->internal.in_submit_request = true; 4008 4009 bdev_nvme_submit_request(ch, bdev_io); 4010 4011 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4012 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 4013 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4014 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 4015 4016 free(bdev_io); 4017 4018 spdk_put_io_channel(ch); 4019 4020 poll_threads(); 4021 4022 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4023 CU_ASSERT(rc == 0); 4024 4025 poll_threads(); 4026 spdk_delay_us(1000); 4027 poll_threads(); 4028 4029 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4030 } 4031 4032 static struct nvme_io_path * 4033 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 4034 struct nvme_ctrlr *nvme_ctrlr) 4035 { 4036 struct nvme_io_path *io_path; 4037 4038 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 4039 if (io_path->qpair->ctrlr == nvme_ctrlr) { 4040 return io_path; 4041 } 4042 } 4043 4044 return NULL; 4045 } 4046 4047 static void 4048 test_reset_bdev_ctrlr(void) 4049 { 4050 struct nvme_path_id path1 = {}, path2 = {}; 4051 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4052 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4053 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4054 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4055 struct nvme_path_id *curr_path1, *curr_path2; 4056 const int STRING_SIZE = 32; 4057 const char *attached_names[STRING_SIZE]; 4058 struct nvme_bdev *bdev; 4059 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4060 struct nvme_bdev_io *first_bio; 4061 struct spdk_io_channel *ch1, *ch2; 4062 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4063 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4064 int rc; 4065 4066 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4067 ut_init_trid(&path1.trid); 4068 ut_init_trid2(&path2.trid); 4069 g_ut_attach_ctrlr_status = 0; 4070 g_ut_attach_bdev_count = 1; 4071 4072 set_thread(0); 4073 4074 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4075 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4076 4077 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4078 attach_ctrlr_done, NULL, &opts, NULL, true); 4079 CU_ASSERT(rc == 0); 4080 4081 spdk_delay_us(1000); 4082 poll_threads(); 4083 4084 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4085 poll_threads(); 4086 4087 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4088 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4089 4090 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4091 attach_ctrlr_done, NULL, &opts, NULL, true); 4092 CU_ASSERT(rc == 0); 4093 4094 spdk_delay_us(1000); 4095 poll_threads(); 4096 4097 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4098 poll_threads(); 4099 4100 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4101 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4102 4103 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4104 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4105 4106 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4107 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4108 4109 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4110 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4111 4112 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4113 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4114 4115 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4116 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4117 4118 set_thread(0); 4119 4120 ch1 = spdk_get_io_channel(bdev); 4121 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4122 4123 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4124 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4125 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4126 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4127 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4128 4129 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4130 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4131 4132 set_thread(1); 4133 4134 ch2 = spdk_get_io_channel(bdev); 4135 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4136 4137 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4138 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4139 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4140 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4141 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4142 4143 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4144 4145 /* The first reset request from bdev_io is submitted on thread 0. 4146 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4147 * 4148 * A few extra polls are necessary after resetting ctrlr1 to check 4149 * pending reset requests for ctrlr1. 4150 */ 4151 ctrlr1->is_failed = true; 4152 curr_path1->last_failed_tsc = spdk_get_ticks(); 4153 ctrlr2->is_failed = true; 4154 curr_path2->last_failed_tsc = spdk_get_ticks(); 4155 4156 set_thread(0); 4157 4158 bdev_nvme_submit_request(ch1, first_bdev_io); 4159 4160 poll_thread_times(0, 1); 4161 poll_thread_times(1, 1); 4162 poll_thread_times(0, 2); 4163 poll_thread_times(1, 1); 4164 poll_thread_times(0, 1); 4165 4166 CU_ASSERT(first_bio->io_path == io_path11); 4167 CU_ASSERT(nvme_ctrlr1->resetting == true); 4168 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4169 4170 poll_thread_times(0, 3); 4171 CU_ASSERT(io_path11->qpair->qpair == NULL); 4172 CU_ASSERT(io_path21->qpair->qpair != NULL); 4173 4174 poll_thread_times(1, 2); 4175 CU_ASSERT(io_path11->qpair->qpair == NULL); 4176 CU_ASSERT(io_path21->qpair->qpair == NULL); 4177 CU_ASSERT(ctrlr1->is_failed == true); 4178 4179 poll_thread_times(0, 1); 4180 CU_ASSERT(nvme_ctrlr1->resetting == true); 4181 CU_ASSERT(ctrlr1->is_failed == false); 4182 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4183 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4184 4185 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4186 poll_thread_times(0, 2); 4187 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4188 4189 poll_thread_times(0, 1); 4190 CU_ASSERT(io_path11->qpair->qpair != NULL); 4191 CU_ASSERT(io_path21->qpair->qpair == NULL); 4192 4193 poll_thread_times(1, 1); 4194 CU_ASSERT(io_path11->qpair->qpair != NULL); 4195 CU_ASSERT(io_path21->qpair->qpair != NULL); 4196 4197 poll_thread_times(0, 2); 4198 CU_ASSERT(nvme_ctrlr1->resetting == true); 4199 poll_thread_times(1, 1); 4200 CU_ASSERT(nvme_ctrlr1->resetting == true); 4201 poll_thread_times(0, 2); 4202 CU_ASSERT(nvme_ctrlr1->resetting == false); 4203 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4204 CU_ASSERT(first_bio->io_path == io_path12); 4205 CU_ASSERT(nvme_ctrlr2->resetting == true); 4206 4207 poll_thread_times(0, 3); 4208 CU_ASSERT(io_path12->qpair->qpair == NULL); 4209 CU_ASSERT(io_path22->qpair->qpair != NULL); 4210 4211 poll_thread_times(1, 2); 4212 CU_ASSERT(io_path12->qpair->qpair == NULL); 4213 CU_ASSERT(io_path22->qpair->qpair == NULL); 4214 CU_ASSERT(ctrlr2->is_failed == true); 4215 4216 poll_thread_times(0, 1); 4217 CU_ASSERT(nvme_ctrlr2->resetting == true); 4218 CU_ASSERT(ctrlr2->is_failed == false); 4219 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4220 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4221 4222 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4223 poll_thread_times(0, 2); 4224 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4225 4226 poll_thread_times(0, 1); 4227 CU_ASSERT(io_path12->qpair->qpair != NULL); 4228 CU_ASSERT(io_path22->qpair->qpair == NULL); 4229 4230 poll_thread_times(1, 2); 4231 CU_ASSERT(io_path12->qpair->qpair != NULL); 4232 CU_ASSERT(io_path22->qpair->qpair != NULL); 4233 4234 poll_thread_times(0, 2); 4235 CU_ASSERT(nvme_ctrlr2->resetting == true); 4236 poll_thread_times(1, 1); 4237 CU_ASSERT(nvme_ctrlr2->resetting == true); 4238 poll_thread_times(0, 2); 4239 CU_ASSERT(first_bio->io_path == NULL); 4240 CU_ASSERT(nvme_ctrlr2->resetting == false); 4241 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4242 4243 poll_threads(); 4244 4245 /* There is a race between two reset requests from bdev_io. 4246 * 4247 * The first reset request is submitted on thread 0, and the second reset 4248 * request is submitted on thread 1 while the first is resetting ctrlr1. 4249 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4250 * both reset requests go to ctrlr2. The first comes earlier than the second. 4251 * The second is pending on ctrlr2 again. After the first completes resetting 4252 * ctrl2, both complete successfully. 4253 */ 4254 ctrlr1->is_failed = true; 4255 curr_path1->last_failed_tsc = spdk_get_ticks(); 4256 ctrlr2->is_failed = true; 4257 curr_path2->last_failed_tsc = spdk_get_ticks(); 4258 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4259 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4260 4261 set_thread(0); 4262 4263 bdev_nvme_submit_request(ch1, first_bdev_io); 4264 4265 set_thread(1); 4266 4267 bdev_nvme_submit_request(ch2, second_bdev_io); 4268 4269 poll_thread_times(0, 1); 4270 poll_thread_times(1, 1); 4271 poll_thread_times(0, 2); 4272 poll_thread_times(1, 1); 4273 poll_thread_times(0, 1); 4274 poll_thread_times(1, 1); 4275 4276 CU_ASSERT(nvme_ctrlr1->resetting == true); 4277 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4278 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == 4279 (struct nvme_bdev_io *)second_bdev_io->driver_ctx); 4280 4281 poll_threads(); 4282 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4283 poll_threads(); 4284 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4285 poll_threads(); 4286 4287 CU_ASSERT(ctrlr1->is_failed == false); 4288 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4289 CU_ASSERT(ctrlr2->is_failed == false); 4290 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4291 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4292 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4293 4294 set_thread(0); 4295 4296 spdk_put_io_channel(ch1); 4297 4298 set_thread(1); 4299 4300 spdk_put_io_channel(ch2); 4301 4302 poll_threads(); 4303 4304 set_thread(0); 4305 4306 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4307 CU_ASSERT(rc == 0); 4308 4309 poll_threads(); 4310 spdk_delay_us(1000); 4311 poll_threads(); 4312 4313 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4314 4315 free(first_bdev_io); 4316 free(second_bdev_io); 4317 } 4318 4319 static void 4320 test_find_io_path(void) 4321 { 4322 struct nvme_bdev_channel nbdev_ch = { 4323 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4324 }; 4325 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4326 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4327 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4328 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4329 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4330 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4331 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4332 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4333 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4334 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4335 4336 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4337 4338 /* Test if io_path whose ANA state is not accessible is excluded. */ 4339 4340 nvme_qpair1.qpair = &qpair1; 4341 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4342 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4343 4344 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4345 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4346 4347 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4348 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4349 4350 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4351 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4352 4353 nbdev_ch.current_io_path = NULL; 4354 4355 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4356 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4357 4358 nbdev_ch.current_io_path = NULL; 4359 4360 /* Test if io_path whose qpair is resetting is excluded. */ 4361 4362 nvme_qpair1.qpair = NULL; 4363 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4364 4365 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4366 4367 /* Test if ANA optimized state or the first found ANA non-optimized state 4368 * is prioritized. 4369 */ 4370 4371 nvme_qpair1.qpair = &qpair1; 4372 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4373 nvme_qpair2.qpair = &qpair2; 4374 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4375 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4376 4377 nbdev_ch.current_io_path = NULL; 4378 4379 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4380 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4381 4382 nbdev_ch.current_io_path = NULL; 4383 } 4384 4385 static void 4386 test_retry_io_if_ana_state_is_updating(void) 4387 { 4388 struct nvme_path_id path = {}; 4389 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 4390 struct spdk_nvme_ctrlr *ctrlr; 4391 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 4392 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4393 struct nvme_ctrlr *nvme_ctrlr; 4394 const int STRING_SIZE = 32; 4395 const char *attached_names[STRING_SIZE]; 4396 struct nvme_bdev *bdev; 4397 struct nvme_ns *nvme_ns; 4398 struct spdk_bdev_io *bdev_io1; 4399 struct spdk_io_channel *ch; 4400 struct nvme_bdev_channel *nbdev_ch; 4401 struct nvme_io_path *io_path; 4402 struct nvme_qpair *nvme_qpair; 4403 int rc; 4404 4405 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4406 ut_init_trid(&path.trid); 4407 4408 set_thread(0); 4409 4410 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4411 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4412 4413 g_ut_attach_ctrlr_status = 0; 4414 g_ut_attach_bdev_count = 1; 4415 4416 opts.ctrlr_loss_timeout_sec = -1; 4417 opts.reconnect_delay_sec = 1; 4418 4419 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4420 attach_ctrlr_done, NULL, &dopts, &opts, false); 4421 CU_ASSERT(rc == 0); 4422 4423 spdk_delay_us(1000); 4424 poll_threads(); 4425 4426 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4427 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4428 4429 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 4430 CU_ASSERT(nvme_ctrlr != NULL); 4431 4432 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4433 CU_ASSERT(bdev != NULL); 4434 4435 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4436 CU_ASSERT(nvme_ns != NULL); 4437 4438 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4439 ut_bdev_io_set_buf(bdev_io1); 4440 4441 ch = spdk_get_io_channel(bdev); 4442 SPDK_CU_ASSERT_FATAL(ch != NULL); 4443 4444 nbdev_ch = spdk_io_channel_get_ctx(ch); 4445 4446 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4447 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4448 4449 nvme_qpair = io_path->qpair; 4450 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4451 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4452 4453 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4454 4455 /* If qpair is connected, I/O should succeed. */ 4456 bdev_io1->internal.in_submit_request = true; 4457 4458 bdev_nvme_submit_request(ch, bdev_io1); 4459 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4460 4461 poll_threads(); 4462 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4463 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4464 4465 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4466 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4467 nbdev_ch->current_io_path = NULL; 4468 4469 bdev_io1->internal.in_submit_request = true; 4470 4471 bdev_nvme_submit_request(ch, bdev_io1); 4472 4473 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4474 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4475 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4476 4477 /* ANA state became accessible while I/O was queued. */ 4478 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4479 4480 spdk_delay_us(1000000); 4481 4482 poll_thread_times(0, 1); 4483 4484 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4485 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4486 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4487 4488 poll_threads(); 4489 4490 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4491 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4492 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4493 4494 free(bdev_io1); 4495 4496 spdk_put_io_channel(ch); 4497 4498 poll_threads(); 4499 4500 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4501 CU_ASSERT(rc == 0); 4502 4503 poll_threads(); 4504 spdk_delay_us(1000); 4505 poll_threads(); 4506 4507 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4508 } 4509 4510 static void 4511 test_retry_io_for_io_path_error(void) 4512 { 4513 struct nvme_path_id path1 = {}, path2 = {}; 4514 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4515 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4516 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4517 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4518 const int STRING_SIZE = 32; 4519 const char *attached_names[STRING_SIZE]; 4520 struct nvme_bdev *bdev; 4521 struct nvme_ns *nvme_ns1, *nvme_ns2; 4522 struct spdk_bdev_io *bdev_io; 4523 struct nvme_bdev_io *bio; 4524 struct spdk_io_channel *ch; 4525 struct nvme_bdev_channel *nbdev_ch; 4526 struct nvme_io_path *io_path1, *io_path2; 4527 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4528 struct ut_nvme_req *req; 4529 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4530 int rc; 4531 4532 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4533 ut_init_trid(&path1.trid); 4534 ut_init_trid2(&path2.trid); 4535 4536 g_opts.bdev_retry_count = 1; 4537 4538 set_thread(0); 4539 4540 g_ut_attach_ctrlr_status = 0; 4541 g_ut_attach_bdev_count = 1; 4542 4543 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4544 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4545 4546 ctrlr1->ns[0].uuid = &uuid1; 4547 4548 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4549 attach_ctrlr_done, NULL, &opts, NULL, true); 4550 CU_ASSERT(rc == 0); 4551 4552 spdk_delay_us(1000); 4553 poll_threads(); 4554 4555 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4556 poll_threads(); 4557 4558 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4559 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4560 4561 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4562 CU_ASSERT(nvme_ctrlr1 != NULL); 4563 4564 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4565 CU_ASSERT(bdev != NULL); 4566 4567 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4568 CU_ASSERT(nvme_ns1 != NULL); 4569 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4570 4571 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4572 ut_bdev_io_set_buf(bdev_io); 4573 4574 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4575 4576 ch = spdk_get_io_channel(bdev); 4577 SPDK_CU_ASSERT_FATAL(ch != NULL); 4578 4579 nbdev_ch = spdk_io_channel_get_ctx(ch); 4580 4581 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4582 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4583 4584 nvme_qpair1 = io_path1->qpair; 4585 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4586 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4587 4588 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4589 4590 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4591 bdev_io->internal.in_submit_request = true; 4592 4593 bdev_nvme_submit_request(ch, bdev_io); 4594 4595 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4596 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4597 4598 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4599 SPDK_CU_ASSERT_FATAL(req != NULL); 4600 4601 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4602 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4603 req->cpl.status.dnr = 1; 4604 4605 poll_thread_times(0, 1); 4606 4607 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4608 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4609 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4610 4611 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4612 bdev_io->internal.in_submit_request = true; 4613 4614 bdev_nvme_submit_request(ch, bdev_io); 4615 4616 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4617 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4618 4619 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4620 SPDK_CU_ASSERT_FATAL(req != NULL); 4621 4622 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4623 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4624 4625 poll_thread_times(0, 1); 4626 4627 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4628 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4629 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4630 4631 poll_threads(); 4632 4633 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4634 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4635 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4636 4637 /* Add io_path2 dynamically, and create a multipath configuration. */ 4638 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4639 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4640 4641 ctrlr2->ns[0].uuid = &uuid1; 4642 4643 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4644 attach_ctrlr_done, NULL, &opts, NULL, true); 4645 CU_ASSERT(rc == 0); 4646 4647 spdk_delay_us(1000); 4648 poll_threads(); 4649 4650 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4651 poll_threads(); 4652 4653 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4654 CU_ASSERT(nvme_ctrlr2 != NULL); 4655 4656 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4657 CU_ASSERT(nvme_ns2 != NULL); 4658 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4659 4660 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4661 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4662 4663 nvme_qpair2 = io_path2->qpair; 4664 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4665 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4666 4667 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4668 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4669 * So after a retry, I/O is submitted to io_path2 and should succeed. 4670 */ 4671 bdev_io->internal.in_submit_request = true; 4672 4673 bdev_nvme_submit_request(ch, bdev_io); 4674 4675 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4676 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4677 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4678 4679 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4680 SPDK_CU_ASSERT_FATAL(req != NULL); 4681 4682 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4683 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4684 4685 poll_thread_times(0, 1); 4686 4687 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4688 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4689 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4690 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4691 4692 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4693 nvme_qpair1->qpair = NULL; 4694 4695 poll_threads(); 4696 4697 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4698 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4699 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4700 4701 free(bdev_io); 4702 4703 spdk_put_io_channel(ch); 4704 4705 poll_threads(); 4706 4707 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4708 CU_ASSERT(rc == 0); 4709 4710 poll_threads(); 4711 spdk_delay_us(1000); 4712 poll_threads(); 4713 4714 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4715 4716 g_opts.bdev_retry_count = 0; 4717 } 4718 4719 static void 4720 test_retry_io_count(void) 4721 { 4722 struct nvme_path_id path = {}; 4723 struct spdk_nvme_ctrlr *ctrlr; 4724 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4725 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4726 struct nvme_ctrlr *nvme_ctrlr; 4727 const int STRING_SIZE = 32; 4728 const char *attached_names[STRING_SIZE]; 4729 struct nvme_bdev *bdev; 4730 struct nvme_ns *nvme_ns; 4731 struct spdk_bdev_io *bdev_io; 4732 struct nvme_bdev_io *bio; 4733 struct spdk_io_channel *ch; 4734 struct nvme_bdev_channel *nbdev_ch; 4735 struct nvme_io_path *io_path; 4736 struct nvme_qpair *nvme_qpair; 4737 struct ut_nvme_req *req; 4738 int rc; 4739 4740 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4741 ut_init_trid(&path.trid); 4742 4743 set_thread(0); 4744 4745 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4746 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4747 4748 g_ut_attach_ctrlr_status = 0; 4749 g_ut_attach_bdev_count = 1; 4750 4751 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4752 attach_ctrlr_done, NULL, &opts, NULL, false); 4753 CU_ASSERT(rc == 0); 4754 4755 spdk_delay_us(1000); 4756 poll_threads(); 4757 4758 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4759 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4760 4761 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 4762 CU_ASSERT(nvme_ctrlr != NULL); 4763 4764 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4765 CU_ASSERT(bdev != NULL); 4766 4767 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4768 CU_ASSERT(nvme_ns != NULL); 4769 4770 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4771 ut_bdev_io_set_buf(bdev_io); 4772 4773 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4774 4775 ch = spdk_get_io_channel(bdev); 4776 SPDK_CU_ASSERT_FATAL(ch != NULL); 4777 4778 nbdev_ch = spdk_io_channel_get_ctx(ch); 4779 4780 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4781 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4782 4783 nvme_qpair = io_path->qpair; 4784 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4785 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4786 4787 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4788 4789 /* If I/O is aborted by request, it should not be retried. */ 4790 g_opts.bdev_retry_count = 1; 4791 4792 bdev_io->internal.in_submit_request = true; 4793 4794 bdev_nvme_submit_request(ch, bdev_io); 4795 4796 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4797 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4798 4799 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4800 SPDK_CU_ASSERT_FATAL(req != NULL); 4801 4802 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4803 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4804 4805 poll_thread_times(0, 1); 4806 4807 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4808 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4809 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4810 4811 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4812 * the failed I/O should not be retried. 4813 */ 4814 g_opts.bdev_retry_count = 4; 4815 4816 bdev_io->internal.in_submit_request = true; 4817 4818 bdev_nvme_submit_request(ch, bdev_io); 4819 4820 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4821 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4822 4823 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4824 SPDK_CU_ASSERT_FATAL(req != NULL); 4825 4826 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4827 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4828 bio->retry_count = 4; 4829 4830 poll_thread_times(0, 1); 4831 4832 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4833 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4834 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4835 4836 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4837 g_opts.bdev_retry_count = -1; 4838 4839 bdev_io->internal.in_submit_request = true; 4840 4841 bdev_nvme_submit_request(ch, bdev_io); 4842 4843 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4844 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4845 4846 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4847 SPDK_CU_ASSERT_FATAL(req != NULL); 4848 4849 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4850 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4851 bio->retry_count = 4; 4852 4853 poll_thread_times(0, 1); 4854 4855 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4856 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4857 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4858 4859 poll_threads(); 4860 4861 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4862 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4863 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4864 4865 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4866 * the failed I/O should be retried. 4867 */ 4868 g_opts.bdev_retry_count = 4; 4869 4870 bdev_io->internal.in_submit_request = true; 4871 4872 bdev_nvme_submit_request(ch, bdev_io); 4873 4874 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4875 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4876 4877 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4878 SPDK_CU_ASSERT_FATAL(req != NULL); 4879 4880 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4881 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4882 bio->retry_count = 3; 4883 4884 poll_thread_times(0, 1); 4885 4886 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4887 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4888 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4889 4890 poll_threads(); 4891 4892 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4893 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4894 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4895 4896 free(bdev_io); 4897 4898 spdk_put_io_channel(ch); 4899 4900 poll_threads(); 4901 4902 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4903 CU_ASSERT(rc == 0); 4904 4905 poll_threads(); 4906 spdk_delay_us(1000); 4907 poll_threads(); 4908 4909 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4910 4911 g_opts.bdev_retry_count = 0; 4912 } 4913 4914 static void 4915 test_concurrent_read_ana_log_page(void) 4916 { 4917 struct spdk_nvme_transport_id trid = {}; 4918 struct spdk_nvme_ctrlr *ctrlr; 4919 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4920 struct nvme_ctrlr *nvme_ctrlr; 4921 const int STRING_SIZE = 32; 4922 const char *attached_names[STRING_SIZE]; 4923 int rc; 4924 4925 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4926 ut_init_trid(&trid); 4927 4928 set_thread(0); 4929 4930 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4931 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4932 4933 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4934 4935 g_ut_attach_ctrlr_status = 0; 4936 g_ut_attach_bdev_count = 1; 4937 4938 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4939 attach_ctrlr_done, NULL, &opts, NULL, false); 4940 CU_ASSERT(rc == 0); 4941 4942 spdk_delay_us(1000); 4943 poll_threads(); 4944 4945 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4946 poll_threads(); 4947 4948 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4949 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4950 4951 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4952 4953 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4954 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4955 4956 /* Following read request should be rejected. */ 4957 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4958 4959 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4960 4961 set_thread(1); 4962 4963 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4964 4965 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4966 4967 /* Reset request while reading ANA log page should not be rejected. */ 4968 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4969 CU_ASSERT(rc == 0); 4970 4971 poll_threads(); 4972 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4973 poll_threads(); 4974 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4975 poll_threads(); 4976 4977 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4978 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4979 4980 /* Read ANA log page while resetting ctrlr should be rejected. */ 4981 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4982 CU_ASSERT(rc == 0); 4983 4984 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4985 4986 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4987 4988 poll_threads(); 4989 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4990 poll_threads(); 4991 4992 set_thread(0); 4993 4994 /* It is possible that target sent ANA change for inactive namespaces. 4995 * 4996 * Previously, assert() was added because this case was unlikely. 4997 * However, assert() was hit in real environment. 4998 4999 * Hence, remove assert() and add unit test case. 5000 * 5001 * Simulate this case by depopulating namespaces and then parsing ANA 5002 * log page created when all namespaces are active. 5003 * Then, check if parsing ANA log page completes successfully. 5004 */ 5005 nvme_ctrlr_depopulate_namespaces(nvme_ctrlr); 5006 5007 rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr); 5008 CU_ASSERT(rc == 0); 5009 5010 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5011 CU_ASSERT(rc == 0); 5012 5013 poll_threads(); 5014 spdk_delay_us(1000); 5015 poll_threads(); 5016 5017 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5018 } 5019 5020 static void 5021 test_retry_io_for_ana_error(void) 5022 { 5023 struct nvme_path_id path = {}; 5024 struct spdk_nvme_ctrlr *ctrlr; 5025 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5026 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5027 struct nvme_ctrlr *nvme_ctrlr; 5028 const int STRING_SIZE = 32; 5029 const char *attached_names[STRING_SIZE]; 5030 struct nvme_bdev *bdev; 5031 struct nvme_ns *nvme_ns; 5032 struct spdk_bdev_io *bdev_io; 5033 struct nvme_bdev_io *bio; 5034 struct spdk_io_channel *ch; 5035 struct nvme_bdev_channel *nbdev_ch; 5036 struct nvme_io_path *io_path; 5037 struct nvme_qpair *nvme_qpair; 5038 struct ut_nvme_req *req; 5039 uint64_t now; 5040 int rc; 5041 5042 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5043 ut_init_trid(&path.trid); 5044 5045 g_opts.bdev_retry_count = 1; 5046 5047 set_thread(0); 5048 5049 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 5050 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5051 5052 g_ut_attach_ctrlr_status = 0; 5053 g_ut_attach_bdev_count = 1; 5054 5055 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5056 attach_ctrlr_done, NULL, &opts, NULL, false); 5057 CU_ASSERT(rc == 0); 5058 5059 spdk_delay_us(1000); 5060 poll_threads(); 5061 5062 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5063 poll_threads(); 5064 5065 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5066 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5067 5068 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 5069 CU_ASSERT(nvme_ctrlr != NULL); 5070 5071 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5072 CU_ASSERT(bdev != NULL); 5073 5074 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5075 CU_ASSERT(nvme_ns != NULL); 5076 5077 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5078 ut_bdev_io_set_buf(bdev_io); 5079 5080 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5081 5082 ch = spdk_get_io_channel(bdev); 5083 SPDK_CU_ASSERT_FATAL(ch != NULL); 5084 5085 nbdev_ch = spdk_io_channel_get_ctx(ch); 5086 5087 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5088 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5089 5090 nvme_qpair = io_path->qpair; 5091 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5092 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5093 5094 now = spdk_get_ticks(); 5095 5096 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5097 5098 /* If I/O got ANA error, it should be queued, the corresponding namespace 5099 * should be freezed and its ANA state should be updated. 5100 */ 5101 bdev_io->internal.in_submit_request = true; 5102 5103 bdev_nvme_submit_request(ch, bdev_io); 5104 5105 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5106 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5107 5108 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5109 SPDK_CU_ASSERT_FATAL(req != NULL); 5110 5111 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5112 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5113 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5114 5115 poll_thread_times(0, 1); 5116 5117 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5118 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5119 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5120 /* I/O should be retried immediately. */ 5121 CU_ASSERT(bio->retry_ticks == now); 5122 CU_ASSERT(nvme_ns->ana_state_updating == true); 5123 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5124 5125 poll_threads(); 5126 5127 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5128 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5129 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5130 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5131 /* I/O should be retried after a second if no I/O path was found but 5132 * any I/O path may become available. 5133 */ 5134 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5135 5136 /* Namespace should be unfreezed after completing to update its ANA state. */ 5137 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5138 poll_threads(); 5139 5140 CU_ASSERT(nvme_ns->ana_state_updating == false); 5141 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5142 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5143 5144 /* Retry the queued I/O should succeed. */ 5145 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5146 poll_threads(); 5147 5148 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5149 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5150 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5151 5152 free(bdev_io); 5153 5154 spdk_put_io_channel(ch); 5155 5156 poll_threads(); 5157 5158 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5159 CU_ASSERT(rc == 0); 5160 5161 poll_threads(); 5162 spdk_delay_us(1000); 5163 poll_threads(); 5164 5165 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5166 5167 g_opts.bdev_retry_count = 0; 5168 } 5169 5170 static void 5171 test_check_io_error_resiliency_params(void) 5172 { 5173 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5174 * 3rd parameter is fast_io_fail_timeout_sec. 5175 */ 5176 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5177 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5178 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5179 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5180 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5181 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5182 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5183 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5184 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5185 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5186 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5187 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5188 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5189 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5190 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5191 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5192 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5193 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5194 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5195 } 5196 5197 static void 5198 test_retry_io_if_ctrlr_is_resetting(void) 5199 { 5200 struct nvme_path_id path = {}; 5201 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5202 struct spdk_nvme_ctrlr *ctrlr; 5203 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5204 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5205 struct nvme_ctrlr *nvme_ctrlr; 5206 const int STRING_SIZE = 32; 5207 const char *attached_names[STRING_SIZE]; 5208 struct nvme_bdev *bdev; 5209 struct nvme_ns *nvme_ns; 5210 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5211 struct spdk_io_channel *ch; 5212 struct nvme_bdev_channel *nbdev_ch; 5213 struct nvme_io_path *io_path; 5214 struct nvme_qpair *nvme_qpair; 5215 int rc; 5216 5217 g_opts.bdev_retry_count = 1; 5218 5219 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5220 ut_init_trid(&path.trid); 5221 5222 set_thread(0); 5223 5224 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5225 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5226 5227 g_ut_attach_ctrlr_status = 0; 5228 g_ut_attach_bdev_count = 1; 5229 5230 opts.ctrlr_loss_timeout_sec = -1; 5231 opts.reconnect_delay_sec = 1; 5232 5233 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5234 attach_ctrlr_done, NULL, &dopts, &opts, false); 5235 CU_ASSERT(rc == 0); 5236 5237 spdk_delay_us(1000); 5238 poll_threads(); 5239 5240 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5241 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5242 5243 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5244 CU_ASSERT(nvme_ctrlr != NULL); 5245 5246 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5247 CU_ASSERT(bdev != NULL); 5248 5249 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5250 CU_ASSERT(nvme_ns != NULL); 5251 5252 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5253 ut_bdev_io_set_buf(bdev_io1); 5254 5255 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5256 ut_bdev_io_set_buf(bdev_io2); 5257 5258 ch = spdk_get_io_channel(bdev); 5259 SPDK_CU_ASSERT_FATAL(ch != NULL); 5260 5261 nbdev_ch = spdk_io_channel_get_ctx(ch); 5262 5263 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5264 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5265 5266 nvme_qpair = io_path->qpair; 5267 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5268 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5269 5270 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5271 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5272 5273 /* If qpair is connected, I/O should succeed. */ 5274 bdev_io1->internal.in_submit_request = true; 5275 5276 bdev_nvme_submit_request(ch, bdev_io1); 5277 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5278 5279 poll_threads(); 5280 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5281 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5282 5283 /* If qpair is disconnected, it is freed and then reconnected via resetting 5284 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5285 * while resetting the nvme_ctrlr. 5286 */ 5287 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5288 ctrlr->is_failed = true; 5289 5290 poll_thread_times(0, 5); 5291 5292 CU_ASSERT(nvme_qpair->qpair == NULL); 5293 CU_ASSERT(nvme_ctrlr->resetting == true); 5294 CU_ASSERT(ctrlr->is_failed == false); 5295 5296 bdev_io1->internal.in_submit_request = true; 5297 5298 bdev_nvme_submit_request(ch, bdev_io1); 5299 5300 spdk_delay_us(1); 5301 5302 bdev_io2->internal.in_submit_request = true; 5303 5304 bdev_nvme_submit_request(ch, bdev_io2); 5305 5306 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5307 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5308 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5309 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx( 5310 TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx, 5311 retry_link))); 5312 5313 poll_threads(); 5314 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5315 poll_threads(); 5316 5317 CU_ASSERT(nvme_qpair->qpair != NULL); 5318 CU_ASSERT(nvme_ctrlr->resetting == false); 5319 5320 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5321 5322 poll_thread_times(0, 1); 5323 5324 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5325 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5326 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5327 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5328 5329 poll_threads(); 5330 5331 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5332 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5333 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5334 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5335 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5336 5337 spdk_delay_us(1); 5338 5339 poll_thread_times(0, 1); 5340 5341 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5342 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5343 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5344 5345 poll_threads(); 5346 5347 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5348 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5349 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5350 5351 free(bdev_io1); 5352 free(bdev_io2); 5353 5354 spdk_put_io_channel(ch); 5355 5356 poll_threads(); 5357 5358 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5359 CU_ASSERT(rc == 0); 5360 5361 poll_threads(); 5362 spdk_delay_us(1000); 5363 poll_threads(); 5364 5365 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5366 5367 g_opts.bdev_retry_count = 0; 5368 } 5369 5370 static void 5371 test_reconnect_ctrlr(void) 5372 { 5373 struct spdk_nvme_transport_id trid = {}; 5374 struct spdk_nvme_ctrlr ctrlr = {}; 5375 struct nvme_ctrlr *nvme_ctrlr; 5376 struct spdk_io_channel *ch1, *ch2; 5377 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5378 int rc; 5379 5380 ut_init_trid(&trid); 5381 TAILQ_INIT(&ctrlr.active_io_qpairs); 5382 5383 set_thread(0); 5384 5385 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5386 CU_ASSERT(rc == 0); 5387 5388 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5389 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5390 5391 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5392 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5393 5394 ch1 = spdk_get_io_channel(nvme_ctrlr); 5395 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5396 5397 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5398 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5399 5400 set_thread(1); 5401 5402 ch2 = spdk_get_io_channel(nvme_ctrlr); 5403 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5404 5405 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5406 5407 /* Reset starts from thread 1. */ 5408 set_thread(1); 5409 5410 /* The reset should fail and a reconnect timer should be registered. */ 5411 ctrlr.fail_reset = true; 5412 ctrlr.is_failed = true; 5413 5414 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5415 CU_ASSERT(rc == 0); 5416 CU_ASSERT(nvme_ctrlr->resetting == true); 5417 CU_ASSERT(ctrlr.is_failed == true); 5418 5419 poll_threads(); 5420 5421 CU_ASSERT(nvme_ctrlr->resetting == false); 5422 CU_ASSERT(ctrlr.is_failed == false); 5423 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5424 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5425 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5426 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5427 5428 /* A new reset starts from thread 0. */ 5429 set_thread(1); 5430 5431 /* The reset should cancel the reconnect timer and should start from reconnection. 5432 * Then, the reset should fail and a reconnect timer should be registered again. 5433 */ 5434 ctrlr.fail_reset = true; 5435 ctrlr.is_failed = true; 5436 5437 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5438 CU_ASSERT(rc == 0); 5439 CU_ASSERT(nvme_ctrlr->resetting == true); 5440 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5441 CU_ASSERT(ctrlr.is_failed == true); 5442 5443 poll_threads(); 5444 5445 CU_ASSERT(nvme_ctrlr->resetting == false); 5446 CU_ASSERT(ctrlr.is_failed == false); 5447 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5448 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5449 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5450 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5451 5452 /* Then a reconnect retry should suceeed. */ 5453 ctrlr.fail_reset = false; 5454 5455 spdk_delay_us(SPDK_SEC_TO_USEC); 5456 poll_thread_times(0, 1); 5457 5458 CU_ASSERT(nvme_ctrlr->resetting == true); 5459 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5460 5461 poll_threads(); 5462 5463 CU_ASSERT(nvme_ctrlr->resetting == false); 5464 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5465 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5466 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5467 5468 /* The reset should fail and a reconnect timer should be registered. */ 5469 ctrlr.fail_reset = true; 5470 ctrlr.is_failed = true; 5471 5472 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5473 CU_ASSERT(rc == 0); 5474 CU_ASSERT(nvme_ctrlr->resetting == true); 5475 CU_ASSERT(ctrlr.is_failed == true); 5476 5477 poll_threads(); 5478 5479 CU_ASSERT(nvme_ctrlr->resetting == false); 5480 CU_ASSERT(ctrlr.is_failed == false); 5481 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5482 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5483 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5484 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5485 5486 /* Then a reconnect retry should still fail. */ 5487 spdk_delay_us(SPDK_SEC_TO_USEC); 5488 poll_thread_times(0, 1); 5489 5490 CU_ASSERT(nvme_ctrlr->resetting == true); 5491 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5492 5493 poll_threads(); 5494 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5495 poll_threads(); 5496 5497 CU_ASSERT(nvme_ctrlr->resetting == false); 5498 CU_ASSERT(ctrlr.is_failed == false); 5499 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5500 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5501 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5502 5503 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5504 spdk_delay_us(SPDK_SEC_TO_USEC); 5505 poll_threads(); 5506 5507 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5508 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5509 CU_ASSERT(nvme_ctrlr->destruct == true); 5510 5511 spdk_put_io_channel(ch2); 5512 5513 set_thread(0); 5514 5515 spdk_put_io_channel(ch1); 5516 5517 poll_threads(); 5518 spdk_delay_us(1000); 5519 poll_threads(); 5520 5521 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5522 } 5523 5524 static struct nvme_path_id * 5525 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5526 const struct spdk_nvme_transport_id *trid) 5527 { 5528 struct nvme_path_id *p; 5529 5530 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5531 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5532 break; 5533 } 5534 } 5535 5536 return p; 5537 } 5538 5539 static void 5540 test_retry_failover_ctrlr(void) 5541 { 5542 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5543 struct spdk_nvme_ctrlr ctrlr = {}; 5544 struct nvme_ctrlr *nvme_ctrlr = NULL; 5545 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5546 struct spdk_io_channel *ch; 5547 struct nvme_ctrlr_channel *ctrlr_ch; 5548 int rc; 5549 5550 ut_init_trid(&trid1); 5551 ut_init_trid2(&trid2); 5552 ut_init_trid3(&trid3); 5553 TAILQ_INIT(&ctrlr.active_io_qpairs); 5554 5555 set_thread(0); 5556 5557 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5558 CU_ASSERT(rc == 0); 5559 5560 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5561 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5562 5563 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5564 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5565 5566 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5567 CU_ASSERT(rc == 0); 5568 5569 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5570 CU_ASSERT(rc == 0); 5571 5572 ch = spdk_get_io_channel(nvme_ctrlr); 5573 SPDK_CU_ASSERT_FATAL(ch != NULL); 5574 5575 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5576 5577 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5578 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5579 CU_ASSERT(path_id1->last_failed_tsc == 0); 5580 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5581 5582 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5583 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5584 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5585 5586 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5587 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5588 5589 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5590 * and a reconnect timer is started. */ 5591 ctrlr.fail_reset = true; 5592 ctrlr.is_failed = true; 5593 5594 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5595 CU_ASSERT(rc == 0); 5596 5597 poll_threads(); 5598 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5599 poll_threads(); 5600 5601 CU_ASSERT(nvme_ctrlr->resetting == false); 5602 CU_ASSERT(ctrlr.is_failed == false); 5603 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5604 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5605 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5606 CU_ASSERT(path_id1->last_failed_tsc != 0); 5607 5608 CU_ASSERT(path_id2->last_failed_tsc != 0); 5609 CU_ASSERT(path_id3->last_failed_tsc != 0); 5610 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5611 5612 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5613 * switched to trid2 but reset is not started. 5614 */ 5615 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5616 CU_ASSERT(rc == -EALREADY); 5617 5618 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5619 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5620 5621 CU_ASSERT(nvme_ctrlr->resetting == false); 5622 5623 /* If reconnect succeeds, trid2 should be the active path_id */ 5624 ctrlr.fail_reset = false; 5625 5626 spdk_delay_us(SPDK_SEC_TO_USEC); 5627 poll_thread_times(0, 1); 5628 5629 CU_ASSERT(nvme_ctrlr->resetting == true); 5630 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5631 5632 poll_threads(); 5633 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5634 poll_threads(); 5635 5636 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5637 CU_ASSERT(path_id2->last_failed_tsc == 0); 5638 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5639 CU_ASSERT(nvme_ctrlr->resetting == false); 5640 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5641 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5642 5643 spdk_put_io_channel(ch); 5644 5645 poll_threads(); 5646 5647 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5648 CU_ASSERT(rc == 0); 5649 5650 poll_threads(); 5651 spdk_delay_us(1000); 5652 poll_threads(); 5653 5654 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5655 } 5656 5657 static void 5658 test_fail_path(void) 5659 { 5660 struct nvme_path_id path = {}; 5661 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5662 struct spdk_nvme_ctrlr *ctrlr; 5663 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5664 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5665 struct nvme_ctrlr *nvme_ctrlr; 5666 const int STRING_SIZE = 32; 5667 const char *attached_names[STRING_SIZE]; 5668 struct nvme_bdev *bdev; 5669 struct nvme_ns *nvme_ns; 5670 struct spdk_bdev_io *bdev_io; 5671 struct spdk_io_channel *ch; 5672 struct nvme_bdev_channel *nbdev_ch; 5673 struct nvme_io_path *io_path; 5674 struct nvme_ctrlr_channel *ctrlr_ch; 5675 int rc; 5676 5677 /* The test scenario is the following. 5678 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5679 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5680 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5681 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5682 * comes first. The queued I/O is failed. 5683 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5684 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5685 */ 5686 5687 g_opts.bdev_retry_count = 1; 5688 5689 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5690 ut_init_trid(&path.trid); 5691 5692 set_thread(0); 5693 5694 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5695 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5696 5697 g_ut_attach_ctrlr_status = 0; 5698 g_ut_attach_bdev_count = 1; 5699 5700 opts.ctrlr_loss_timeout_sec = 4; 5701 opts.reconnect_delay_sec = 1; 5702 opts.fast_io_fail_timeout_sec = 2; 5703 5704 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5705 attach_ctrlr_done, NULL, &dopts, &opts, false); 5706 CU_ASSERT(rc == 0); 5707 5708 spdk_delay_us(1000); 5709 poll_threads(); 5710 5711 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5712 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5713 5714 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5715 CU_ASSERT(nvme_ctrlr != NULL); 5716 5717 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5718 CU_ASSERT(bdev != NULL); 5719 5720 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5721 CU_ASSERT(nvme_ns != NULL); 5722 5723 ch = spdk_get_io_channel(bdev); 5724 SPDK_CU_ASSERT_FATAL(ch != NULL); 5725 5726 nbdev_ch = spdk_io_channel_get_ctx(ch); 5727 5728 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5729 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5730 5731 ctrlr_ch = io_path->qpair->ctrlr_ch; 5732 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5733 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5734 5735 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5736 ut_bdev_io_set_buf(bdev_io); 5737 5738 5739 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5740 ctrlr->fail_reset = true; 5741 ctrlr->is_failed = true; 5742 5743 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5744 CU_ASSERT(rc == 0); 5745 CU_ASSERT(nvme_ctrlr->resetting == true); 5746 CU_ASSERT(ctrlr->is_failed == true); 5747 5748 poll_threads(); 5749 5750 CU_ASSERT(nvme_ctrlr->resetting == false); 5751 CU_ASSERT(ctrlr->is_failed == false); 5752 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5753 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5754 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5755 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5756 5757 /* I/O should be queued. */ 5758 bdev_io->internal.in_submit_request = true; 5759 5760 bdev_nvme_submit_request(ch, bdev_io); 5761 5762 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5763 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5764 5765 /* After a second, the I/O should be still queued and the ctrlr should be 5766 * still recovering. 5767 */ 5768 spdk_delay_us(SPDK_SEC_TO_USEC); 5769 poll_threads(); 5770 5771 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5772 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5773 5774 CU_ASSERT(nvme_ctrlr->resetting == false); 5775 CU_ASSERT(ctrlr->is_failed == false); 5776 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5777 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5778 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5779 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5780 5781 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5782 5783 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5784 spdk_delay_us(SPDK_SEC_TO_USEC); 5785 poll_threads(); 5786 5787 CU_ASSERT(nvme_ctrlr->resetting == false); 5788 CU_ASSERT(ctrlr->is_failed == false); 5789 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5790 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5791 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5792 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5793 5794 /* Then within a second, pending I/O should be failed. */ 5795 spdk_delay_us(SPDK_SEC_TO_USEC); 5796 poll_threads(); 5797 5798 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5799 poll_threads(); 5800 5801 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5802 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5803 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5804 5805 /* Another I/O submission should be failed immediately. */ 5806 bdev_io->internal.in_submit_request = true; 5807 5808 bdev_nvme_submit_request(ch, bdev_io); 5809 5810 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5811 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5812 5813 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5814 * be deleted. 5815 */ 5816 spdk_delay_us(SPDK_SEC_TO_USEC); 5817 poll_threads(); 5818 5819 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5820 poll_threads(); 5821 5822 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5823 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5824 CU_ASSERT(nvme_ctrlr->destruct == true); 5825 5826 spdk_put_io_channel(ch); 5827 5828 poll_threads(); 5829 spdk_delay_us(1000); 5830 poll_threads(); 5831 5832 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5833 5834 free(bdev_io); 5835 5836 g_opts.bdev_retry_count = 0; 5837 } 5838 5839 static void 5840 test_nvme_ns_cmp(void) 5841 { 5842 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5843 5844 nvme_ns1.id = 0; 5845 nvme_ns2.id = UINT32_MAX; 5846 5847 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5848 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5849 } 5850 5851 static void 5852 test_ana_transition(void) 5853 { 5854 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5855 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5856 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5857 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5858 5859 /* case 1: ANA transition timedout is canceled. */ 5860 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5861 nvme_ns.ana_transition_timedout = true; 5862 5863 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5864 5865 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5866 5867 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5868 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5869 5870 /* case 2: ANATT timer is kept. */ 5871 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5872 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5873 &nvme_ns, 5874 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5875 5876 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5877 5878 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5879 5880 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5881 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5882 5883 /* case 3: ANATT timer is stopped. */ 5884 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5885 5886 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5887 5888 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5889 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5890 5891 /* ANATT timer is started. */ 5892 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5893 5894 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5895 5896 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5897 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5898 5899 /* ANATT timer is expired. */ 5900 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5901 5902 poll_threads(); 5903 5904 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5905 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5906 } 5907 5908 static void 5909 _set_preferred_path_cb(void *cb_arg, int rc) 5910 { 5911 bool *done = cb_arg; 5912 5913 *done = true; 5914 } 5915 5916 static void 5917 test_set_preferred_path(void) 5918 { 5919 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5920 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5921 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5922 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5923 const int STRING_SIZE = 32; 5924 const char *attached_names[STRING_SIZE]; 5925 struct nvme_bdev *bdev; 5926 struct spdk_io_channel *ch; 5927 struct nvme_bdev_channel *nbdev_ch; 5928 struct nvme_io_path *io_path; 5929 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5930 const struct spdk_nvme_ctrlr_data *cdata; 5931 bool done; 5932 int rc; 5933 5934 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5935 ut_init_trid(&path1.trid); 5936 ut_init_trid2(&path2.trid); 5937 ut_init_trid3(&path3.trid); 5938 g_ut_attach_ctrlr_status = 0; 5939 g_ut_attach_bdev_count = 1; 5940 5941 set_thread(0); 5942 5943 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5944 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5945 5946 ctrlr1->ns[0].uuid = &uuid1; 5947 5948 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5949 attach_ctrlr_done, NULL, &opts, NULL, true); 5950 CU_ASSERT(rc == 0); 5951 5952 spdk_delay_us(1000); 5953 poll_threads(); 5954 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5955 poll_threads(); 5956 5957 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5958 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5959 5960 ctrlr2->ns[0].uuid = &uuid1; 5961 5962 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5963 attach_ctrlr_done, NULL, &opts, NULL, true); 5964 CU_ASSERT(rc == 0); 5965 5966 spdk_delay_us(1000); 5967 poll_threads(); 5968 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5969 poll_threads(); 5970 5971 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5972 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5973 5974 ctrlr3->ns[0].uuid = &uuid1; 5975 5976 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5977 attach_ctrlr_done, NULL, &opts, NULL, true); 5978 CU_ASSERT(rc == 0); 5979 5980 spdk_delay_us(1000); 5981 poll_threads(); 5982 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5983 poll_threads(); 5984 5985 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5986 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5987 5988 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5989 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5990 5991 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5992 5993 ch = spdk_get_io_channel(bdev); 5994 SPDK_CU_ASSERT_FATAL(ch != NULL); 5995 nbdev_ch = spdk_io_channel_get_ctx(ch); 5996 5997 io_path = bdev_nvme_find_io_path(nbdev_ch); 5998 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5999 6000 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6001 6002 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 6003 * should return io_path to ctrlr2. 6004 */ 6005 6006 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 6007 done = false; 6008 6009 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6010 6011 poll_threads(); 6012 CU_ASSERT(done == true); 6013 6014 io_path = bdev_nvme_find_io_path(nbdev_ch); 6015 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6016 6017 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6018 6019 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 6020 * acquired, find_io_path() should return io_path to ctrlr3. 6021 */ 6022 6023 spdk_put_io_channel(ch); 6024 6025 poll_threads(); 6026 6027 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 6028 done = false; 6029 6030 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6031 6032 poll_threads(); 6033 CU_ASSERT(done == true); 6034 6035 ch = spdk_get_io_channel(bdev); 6036 SPDK_CU_ASSERT_FATAL(ch != NULL); 6037 nbdev_ch = spdk_io_channel_get_ctx(ch); 6038 6039 io_path = bdev_nvme_find_io_path(nbdev_ch); 6040 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6041 6042 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 6043 6044 spdk_put_io_channel(ch); 6045 6046 poll_threads(); 6047 6048 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6049 CU_ASSERT(rc == 0); 6050 6051 poll_threads(); 6052 spdk_delay_us(1000); 6053 poll_threads(); 6054 6055 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6056 } 6057 6058 static void 6059 test_find_next_io_path(void) 6060 { 6061 struct nvme_bdev_channel nbdev_ch = { 6062 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6063 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6064 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 6065 }; 6066 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6067 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6068 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6069 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6070 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6071 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6072 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6073 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6074 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6075 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6076 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6077 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6078 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6079 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6080 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6081 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6082 6083 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6084 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6085 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6086 6087 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6088 * is covered in test_find_io_path. 6089 */ 6090 6091 nbdev_ch.current_io_path = &io_path2; 6092 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6093 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6094 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6095 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6096 6097 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6098 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6099 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6100 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6101 6102 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6103 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6104 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6105 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6106 6107 nbdev_ch.current_io_path = &io_path3; 6108 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6109 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6110 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6111 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6112 6113 /* Test if next io_path is selected according to rr_min_io */ 6114 6115 nbdev_ch.current_io_path = NULL; 6116 nbdev_ch.rr_min_io = 2; 6117 nbdev_ch.rr_counter = 0; 6118 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6119 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6120 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6121 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6122 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6123 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6124 6125 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6126 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6127 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6128 } 6129 6130 static void 6131 test_find_io_path_min_qd(void) 6132 { 6133 struct nvme_bdev_channel nbdev_ch = { 6134 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6135 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6136 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6137 }; 6138 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6139 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6140 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6141 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6142 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6143 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6144 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6145 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6146 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6147 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6148 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6149 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6150 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6151 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6152 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6153 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6154 6155 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6156 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6157 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6158 6159 /* Test if the minimum io_outstanding or the ANA optimized state is 6160 * prioritized when using least queue depth selector 6161 */ 6162 qpair1.num_outstanding_reqs = 2; 6163 qpair2.num_outstanding_reqs = 1; 6164 qpair3.num_outstanding_reqs = 0; 6165 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6166 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6167 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6168 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6169 6170 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6171 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6172 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6173 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6174 6175 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6176 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6177 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6178 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6179 6180 qpair2.num_outstanding_reqs = 4; 6181 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6182 } 6183 6184 static void 6185 test_disable_auto_failback(void) 6186 { 6187 struct nvme_path_id path1 = {}, path2 = {}; 6188 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6189 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6190 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6191 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6192 struct nvme_ctrlr *nvme_ctrlr1; 6193 const int STRING_SIZE = 32; 6194 const char *attached_names[STRING_SIZE]; 6195 struct nvme_bdev *bdev; 6196 struct spdk_io_channel *ch; 6197 struct nvme_bdev_channel *nbdev_ch; 6198 struct nvme_io_path *io_path; 6199 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6200 const struct spdk_nvme_ctrlr_data *cdata; 6201 bool done; 6202 int rc; 6203 6204 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6205 ut_init_trid(&path1.trid); 6206 ut_init_trid2(&path2.trid); 6207 g_ut_attach_ctrlr_status = 0; 6208 g_ut_attach_bdev_count = 1; 6209 6210 g_opts.disable_auto_failback = true; 6211 6212 opts.ctrlr_loss_timeout_sec = -1; 6213 opts.reconnect_delay_sec = 1; 6214 6215 set_thread(0); 6216 6217 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6218 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6219 6220 ctrlr1->ns[0].uuid = &uuid1; 6221 6222 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6223 attach_ctrlr_done, NULL, &dopts, &opts, true); 6224 CU_ASSERT(rc == 0); 6225 6226 spdk_delay_us(1000); 6227 poll_threads(); 6228 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6229 poll_threads(); 6230 6231 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6232 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6233 6234 ctrlr2->ns[0].uuid = &uuid1; 6235 6236 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6237 attach_ctrlr_done, NULL, &dopts, &opts, true); 6238 CU_ASSERT(rc == 0); 6239 6240 spdk_delay_us(1000); 6241 poll_threads(); 6242 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6243 poll_threads(); 6244 6245 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6246 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6247 6248 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6249 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6250 6251 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn); 6252 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6253 6254 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6255 6256 ch = spdk_get_io_channel(bdev); 6257 SPDK_CU_ASSERT_FATAL(ch != NULL); 6258 nbdev_ch = spdk_io_channel_get_ctx(ch); 6259 6260 io_path = bdev_nvme_find_io_path(nbdev_ch); 6261 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6262 6263 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6264 6265 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6266 ctrlr1->fail_reset = true; 6267 ctrlr1->is_failed = true; 6268 6269 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6270 6271 poll_threads(); 6272 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6273 poll_threads(); 6274 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6275 poll_threads(); 6276 6277 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6278 6279 io_path = bdev_nvme_find_io_path(nbdev_ch); 6280 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6281 6282 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6283 6284 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6285 * Hence, io_path to ctrlr2 should still be used. 6286 */ 6287 ctrlr1->fail_reset = false; 6288 6289 spdk_delay_us(SPDK_SEC_TO_USEC); 6290 poll_threads(); 6291 6292 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6293 6294 io_path = bdev_nvme_find_io_path(nbdev_ch); 6295 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6296 6297 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6298 6299 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6300 * be used again. 6301 */ 6302 6303 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6304 done = false; 6305 6306 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6307 6308 poll_threads(); 6309 CU_ASSERT(done == true); 6310 6311 io_path = bdev_nvme_find_io_path(nbdev_ch); 6312 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6313 6314 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6315 6316 spdk_put_io_channel(ch); 6317 6318 poll_threads(); 6319 6320 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6321 CU_ASSERT(rc == 0); 6322 6323 poll_threads(); 6324 spdk_delay_us(1000); 6325 poll_threads(); 6326 6327 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6328 6329 g_opts.disable_auto_failback = false; 6330 } 6331 6332 static void 6333 ut_set_multipath_policy_done(void *cb_arg, int rc) 6334 { 6335 int *done = cb_arg; 6336 6337 SPDK_CU_ASSERT_FATAL(done != NULL); 6338 *done = rc; 6339 } 6340 6341 static void 6342 test_set_multipath_policy(void) 6343 { 6344 struct nvme_path_id path1 = {}, path2 = {}; 6345 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6346 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6347 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6348 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6349 const int STRING_SIZE = 32; 6350 const char *attached_names[STRING_SIZE]; 6351 struct nvme_bdev *bdev; 6352 struct spdk_io_channel *ch; 6353 struct nvme_bdev_channel *nbdev_ch; 6354 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6355 int done; 6356 int rc; 6357 6358 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6359 ut_init_trid(&path1.trid); 6360 ut_init_trid2(&path2.trid); 6361 g_ut_attach_ctrlr_status = 0; 6362 g_ut_attach_bdev_count = 1; 6363 6364 g_opts.disable_auto_failback = true; 6365 6366 opts.ctrlr_loss_timeout_sec = -1; 6367 opts.reconnect_delay_sec = 1; 6368 6369 set_thread(0); 6370 6371 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6372 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6373 6374 ctrlr1->ns[0].uuid = &uuid1; 6375 6376 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6377 attach_ctrlr_done, NULL, &dopts, &opts, true); 6378 CU_ASSERT(rc == 0); 6379 6380 spdk_delay_us(1000); 6381 poll_threads(); 6382 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6383 poll_threads(); 6384 6385 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6386 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6387 6388 ctrlr2->ns[0].uuid = &uuid1; 6389 6390 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6391 attach_ctrlr_done, NULL, &dopts, &opts, true); 6392 CU_ASSERT(rc == 0); 6393 6394 spdk_delay_us(1000); 6395 poll_threads(); 6396 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6397 poll_threads(); 6398 6399 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6400 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6401 6402 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6403 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6404 6405 /* If multipath policy is updated before getting any I/O channel, 6406 * an new I/O channel should have the update. 6407 */ 6408 done = -1; 6409 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6410 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6411 ut_set_multipath_policy_done, &done); 6412 poll_threads(); 6413 CU_ASSERT(done == 0); 6414 6415 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6416 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6417 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6418 6419 ch = spdk_get_io_channel(bdev); 6420 SPDK_CU_ASSERT_FATAL(ch != NULL); 6421 nbdev_ch = spdk_io_channel_get_ctx(ch); 6422 6423 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6424 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6425 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6426 6427 /* If multipath policy is updated while a I/O channel is active, 6428 * the update should be applied to the I/O channel immediately. 6429 */ 6430 done = -1; 6431 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6432 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6433 ut_set_multipath_policy_done, &done); 6434 poll_threads(); 6435 CU_ASSERT(done == 0); 6436 6437 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6438 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6439 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6440 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6441 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6442 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6443 6444 spdk_put_io_channel(ch); 6445 6446 poll_threads(); 6447 6448 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6449 CU_ASSERT(rc == 0); 6450 6451 poll_threads(); 6452 spdk_delay_us(1000); 6453 poll_threads(); 6454 6455 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6456 } 6457 6458 static void 6459 test_uuid_generation(void) 6460 { 6461 uint32_t nsid1 = 1, nsid2 = 2; 6462 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6463 char sn3[21] = " "; 6464 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6465 struct spdk_uuid uuid1, uuid2; 6466 int rc; 6467 6468 /* Test case 1: 6469 * Serial numbers are the same, nsids are different. 6470 * Compare two generated UUID - they should be different. */ 6471 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6472 CU_ASSERT(rc == 0); 6473 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6474 CU_ASSERT(rc == 0); 6475 6476 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6477 6478 /* Test case 2: 6479 * Serial numbers differ only by one character, nsids are the same. 6480 * Compare two generated UUID - they should be different. */ 6481 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6482 CU_ASSERT(rc == 0); 6483 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6484 CU_ASSERT(rc == 0); 6485 6486 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6487 6488 /* Test case 3: 6489 * Serial number comprises only of space characters. 6490 * Validate the generated UUID. */ 6491 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6492 CU_ASSERT(rc == 0); 6493 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6494 6495 } 6496 6497 static void 6498 test_retry_io_to_same_path(void) 6499 { 6500 struct nvme_path_id path1 = {}, path2 = {}; 6501 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6502 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 6503 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6504 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6505 const int STRING_SIZE = 32; 6506 const char *attached_names[STRING_SIZE]; 6507 struct nvme_bdev *bdev; 6508 struct spdk_bdev_io *bdev_io; 6509 struct nvme_bdev_io *bio; 6510 struct spdk_io_channel *ch; 6511 struct nvme_bdev_channel *nbdev_ch; 6512 struct nvme_io_path *io_path1, *io_path2; 6513 struct ut_nvme_req *req; 6514 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6515 int done; 6516 int rc; 6517 6518 g_opts.nvme_ioq_poll_period_us = 1; 6519 6520 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6521 ut_init_trid(&path1.trid); 6522 ut_init_trid2(&path2.trid); 6523 g_ut_attach_ctrlr_status = 0; 6524 g_ut_attach_bdev_count = 1; 6525 6526 set_thread(0); 6527 6528 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6529 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6530 6531 ctrlr1->ns[0].uuid = &uuid1; 6532 6533 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6534 attach_ctrlr_done, NULL, &opts, NULL, true); 6535 CU_ASSERT(rc == 0); 6536 6537 spdk_delay_us(1000); 6538 poll_threads(); 6539 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6540 poll_threads(); 6541 6542 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6543 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6544 6545 ctrlr2->ns[0].uuid = &uuid1; 6546 6547 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6548 attach_ctrlr_done, NULL, &opts, NULL, true); 6549 CU_ASSERT(rc == 0); 6550 6551 spdk_delay_us(1000); 6552 poll_threads(); 6553 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6554 poll_threads(); 6555 6556 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6557 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6558 6559 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 6560 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6561 6562 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 6563 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6564 6565 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6566 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6567 6568 done = -1; 6569 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6570 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6571 poll_threads(); 6572 CU_ASSERT(done == 0); 6573 6574 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6575 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6576 CU_ASSERT(bdev->rr_min_io == 1); 6577 6578 ch = spdk_get_io_channel(bdev); 6579 SPDK_CU_ASSERT_FATAL(ch != NULL); 6580 nbdev_ch = spdk_io_channel_get_ctx(ch); 6581 6582 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6583 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6584 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6585 6586 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6587 ut_bdev_io_set_buf(bdev_io); 6588 6589 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6590 6591 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6592 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6593 6594 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6595 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6596 6597 /* The 1st I/O should be submitted to io_path1. */ 6598 bdev_io->internal.in_submit_request = true; 6599 6600 bdev_nvme_submit_request(ch, bdev_io); 6601 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6602 CU_ASSERT(bio->io_path == io_path1); 6603 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6604 6605 spdk_delay_us(1); 6606 6607 poll_threads(); 6608 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6609 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6610 6611 /* The 2nd I/O should be submitted to io_path2 because the path selection 6612 * policy is round-robin. 6613 */ 6614 bdev_io->internal.in_submit_request = true; 6615 6616 bdev_nvme_submit_request(ch, bdev_io); 6617 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6618 CU_ASSERT(bio->io_path == io_path2); 6619 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6620 6621 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6622 SPDK_CU_ASSERT_FATAL(req != NULL); 6623 6624 /* Set retry count to non-zero. */ 6625 g_opts.bdev_retry_count = 2; 6626 6627 /* Inject an I/O error. */ 6628 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6629 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6630 6631 /* The 2nd I/O should be queued to nbdev_ch. */ 6632 spdk_delay_us(1); 6633 poll_thread_times(0, 1); 6634 6635 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6636 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6637 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6638 6639 /* The 2nd I/O should keep caching io_path2. */ 6640 CU_ASSERT(bio->io_path == io_path2); 6641 6642 /* The 2nd I/O should be submitted to io_path2 again. */ 6643 poll_thread_times(0, 1); 6644 6645 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6646 CU_ASSERT(bio->io_path == io_path2); 6647 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6648 6649 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6650 SPDK_CU_ASSERT_FATAL(req != NULL); 6651 6652 /* Inject an I/O error again. */ 6653 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6654 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6655 req->cpl.status.crd = 1; 6656 6657 ctrlr2->cdata.crdt[1] = 1; 6658 6659 /* The 2nd I/O should be queued to nbdev_ch. */ 6660 spdk_delay_us(1); 6661 poll_thread_times(0, 1); 6662 6663 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6664 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6665 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6666 6667 /* The 2nd I/O should keep caching io_path2. */ 6668 CU_ASSERT(bio->io_path == io_path2); 6669 6670 /* Detach ctrlr2 dynamically. */ 6671 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6672 CU_ASSERT(rc == 0); 6673 6674 spdk_delay_us(1000); 6675 poll_threads(); 6676 spdk_delay_us(1000); 6677 poll_threads(); 6678 spdk_delay_us(1000); 6679 poll_threads(); 6680 spdk_delay_us(1000); 6681 poll_threads(); 6682 6683 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 6684 6685 poll_threads(); 6686 spdk_delay_us(100000); 6687 poll_threads(); 6688 spdk_delay_us(1); 6689 poll_threads(); 6690 6691 /* The 2nd I/O should succeed by io_path1. */ 6692 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6693 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6694 CU_ASSERT(bio->io_path == io_path1); 6695 6696 free(bdev_io); 6697 6698 spdk_put_io_channel(ch); 6699 6700 poll_threads(); 6701 spdk_delay_us(1); 6702 poll_threads(); 6703 6704 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6705 CU_ASSERT(rc == 0); 6706 6707 poll_threads(); 6708 spdk_delay_us(1000); 6709 poll_threads(); 6710 6711 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6712 6713 g_opts.nvme_ioq_poll_period_us = 0; 6714 g_opts.bdev_retry_count = 0; 6715 } 6716 6717 /* This case is to verify a fix for a complex race condition that 6718 * failover is lost if fabric connect command gets timeout while 6719 * controller is being reset. 6720 */ 6721 static void 6722 test_race_between_reset_and_disconnected(void) 6723 { 6724 struct spdk_nvme_transport_id trid = {}; 6725 struct spdk_nvme_ctrlr ctrlr = {}; 6726 struct nvme_ctrlr *nvme_ctrlr = NULL; 6727 struct nvme_path_id *curr_trid; 6728 struct spdk_io_channel *ch1, *ch2; 6729 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6730 int rc; 6731 6732 ut_init_trid(&trid); 6733 TAILQ_INIT(&ctrlr.active_io_qpairs); 6734 6735 set_thread(0); 6736 6737 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6738 CU_ASSERT(rc == 0); 6739 6740 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6741 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6742 6743 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6744 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6745 6746 ch1 = spdk_get_io_channel(nvme_ctrlr); 6747 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6748 6749 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6750 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6751 6752 set_thread(1); 6753 6754 ch2 = spdk_get_io_channel(nvme_ctrlr); 6755 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6756 6757 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6758 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6759 6760 /* Reset starts from thread 1. */ 6761 set_thread(1); 6762 6763 nvme_ctrlr->resetting = false; 6764 curr_trid->last_failed_tsc = spdk_get_ticks(); 6765 ctrlr.is_failed = true; 6766 6767 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6768 CU_ASSERT(rc == 0); 6769 CU_ASSERT(nvme_ctrlr->resetting == true); 6770 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6771 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6772 6773 poll_thread_times(0, 3); 6774 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6775 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6776 6777 poll_thread_times(0, 1); 6778 poll_thread_times(1, 1); 6779 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6780 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6781 CU_ASSERT(ctrlr.is_failed == true); 6782 6783 poll_thread_times(1, 1); 6784 poll_thread_times(0, 1); 6785 CU_ASSERT(ctrlr.is_failed == false); 6786 CU_ASSERT(ctrlr.adminq.is_connected == false); 6787 6788 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6789 poll_thread_times(0, 2); 6790 CU_ASSERT(ctrlr.adminq.is_connected == true); 6791 6792 poll_thread_times(0, 1); 6793 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6794 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6795 6796 poll_thread_times(1, 1); 6797 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6798 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6799 CU_ASSERT(nvme_ctrlr->resetting == true); 6800 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6801 6802 poll_thread_times(0, 2); 6803 CU_ASSERT(nvme_ctrlr->resetting == true); 6804 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6805 poll_thread_times(1, 1); 6806 CU_ASSERT(nvme_ctrlr->resetting == true); 6807 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6808 6809 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6810 * 6811 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6812 * connect command is executed. If fabric connect command gets timeout, 6813 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6814 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6815 * 6816 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6817 */ 6818 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6819 CU_ASSERT(rc == -EINPROGRESS); 6820 CU_ASSERT(nvme_ctrlr->resetting == true); 6821 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6822 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6823 6824 poll_thread_times(0, 1); 6825 6826 CU_ASSERT(nvme_ctrlr->resetting == true); 6827 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6828 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6829 6830 poll_threads(); 6831 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6832 poll_threads(); 6833 6834 CU_ASSERT(nvme_ctrlr->resetting == false); 6835 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6836 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6837 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6838 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6839 6840 spdk_put_io_channel(ch2); 6841 6842 set_thread(0); 6843 6844 spdk_put_io_channel(ch1); 6845 6846 poll_threads(); 6847 6848 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6849 CU_ASSERT(rc == 0); 6850 6851 poll_threads(); 6852 spdk_delay_us(1000); 6853 poll_threads(); 6854 6855 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6856 } 6857 static void 6858 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6859 { 6860 int *_rc = (int *)cb_arg; 6861 6862 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6863 *_rc = rc; 6864 } 6865 6866 static void 6867 test_ctrlr_op_rpc(void) 6868 { 6869 struct spdk_nvme_transport_id trid = {}; 6870 struct spdk_nvme_ctrlr ctrlr = {}; 6871 struct nvme_ctrlr *nvme_ctrlr = NULL; 6872 struct nvme_path_id *curr_trid; 6873 struct spdk_io_channel *ch1, *ch2; 6874 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6875 int ctrlr_op_rc; 6876 int rc; 6877 6878 ut_init_trid(&trid); 6879 TAILQ_INIT(&ctrlr.active_io_qpairs); 6880 6881 set_thread(0); 6882 6883 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6884 CU_ASSERT(rc == 0); 6885 6886 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6887 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6888 6889 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6890 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6891 6892 ch1 = spdk_get_io_channel(nvme_ctrlr); 6893 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6894 6895 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6896 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6897 6898 set_thread(1); 6899 6900 ch2 = spdk_get_io_channel(nvme_ctrlr); 6901 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6902 6903 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6904 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6905 6906 /* Reset starts from thread 1. */ 6907 set_thread(1); 6908 6909 /* Case 1: ctrlr is already being destructed. */ 6910 nvme_ctrlr->destruct = true; 6911 ctrlr_op_rc = 0; 6912 6913 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6914 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6915 6916 poll_threads(); 6917 6918 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6919 6920 /* Case 2: reset is in progress. */ 6921 nvme_ctrlr->destruct = false; 6922 nvme_ctrlr->resetting = true; 6923 ctrlr_op_rc = 0; 6924 6925 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6926 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6927 6928 poll_threads(); 6929 6930 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6931 6932 /* Case 3: reset completes successfully. */ 6933 nvme_ctrlr->resetting = false; 6934 curr_trid->last_failed_tsc = spdk_get_ticks(); 6935 ctrlr.is_failed = true; 6936 ctrlr_op_rc = -1; 6937 6938 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6939 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6940 6941 CU_ASSERT(nvme_ctrlr->resetting == true); 6942 CU_ASSERT(ctrlr_op_rc == -1); 6943 6944 poll_threads(); 6945 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6946 poll_threads(); 6947 6948 CU_ASSERT(nvme_ctrlr->resetting == false); 6949 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6950 CU_ASSERT(ctrlr.is_failed == false); 6951 CU_ASSERT(ctrlr_op_rc == 0); 6952 6953 /* Case 4: invalid operation. */ 6954 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6955 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6956 6957 poll_threads(); 6958 6959 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6960 6961 spdk_put_io_channel(ch2); 6962 6963 set_thread(0); 6964 6965 spdk_put_io_channel(ch1); 6966 6967 poll_threads(); 6968 6969 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6970 CU_ASSERT(rc == 0); 6971 6972 poll_threads(); 6973 spdk_delay_us(1000); 6974 poll_threads(); 6975 6976 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6977 } 6978 6979 static void 6980 test_bdev_ctrlr_op_rpc(void) 6981 { 6982 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6983 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6984 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6985 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6986 struct nvme_path_id *curr_trid1, *curr_trid2; 6987 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6988 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6989 int ctrlr_op_rc; 6990 int rc; 6991 6992 ut_init_trid(&trid1); 6993 ut_init_trid2(&trid2); 6994 TAILQ_INIT(&ctrlr1.active_io_qpairs); 6995 TAILQ_INIT(&ctrlr2.active_io_qpairs); 6996 ctrlr1.cdata.cmic.multi_ctrlr = 1; 6997 ctrlr2.cdata.cmic.multi_ctrlr = 1; 6998 ctrlr1.cdata.cntlid = 1; 6999 ctrlr2.cdata.cntlid = 2; 7000 ctrlr1.adminq.is_connected = true; 7001 ctrlr2.adminq.is_connected = true; 7002 7003 set_thread(0); 7004 7005 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 7006 CU_ASSERT(rc == 0); 7007 7008 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7009 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7010 7011 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN); 7012 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 7013 7014 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 7015 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 7016 7017 ch11 = spdk_get_io_channel(nvme_ctrlr1); 7018 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 7019 7020 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 7021 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7022 7023 set_thread(1); 7024 7025 ch12 = spdk_get_io_channel(nvme_ctrlr1); 7026 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 7027 7028 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 7029 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7030 7031 set_thread(0); 7032 7033 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 7034 CU_ASSERT(rc == 0); 7035 7036 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN); 7037 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 7038 7039 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 7040 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 7041 7042 ch21 = spdk_get_io_channel(nvme_ctrlr2); 7043 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 7044 7045 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 7046 CU_ASSERT(ctrlr_ch21->qpair != NULL); 7047 7048 set_thread(1); 7049 7050 ch22 = spdk_get_io_channel(nvme_ctrlr2); 7051 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 7052 7053 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 7054 CU_ASSERT(ctrlr_ch22->qpair != NULL); 7055 7056 /* Reset starts from thread 1. */ 7057 set_thread(1); 7058 7059 nvme_ctrlr1->resetting = false; 7060 nvme_ctrlr2->resetting = false; 7061 curr_trid1->last_failed_tsc = spdk_get_ticks(); 7062 curr_trid2->last_failed_tsc = spdk_get_ticks(); 7063 ctrlr_op_rc = -1; 7064 7065 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 7066 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 7067 7068 CU_ASSERT(nvme_ctrlr1->resetting == true); 7069 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7070 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7071 CU_ASSERT(nvme_ctrlr2->resetting == false); 7072 7073 poll_thread_times(0, 3); 7074 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7075 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7076 7077 poll_thread_times(0, 1); 7078 poll_thread_times(1, 1); 7079 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7080 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7081 7082 poll_thread_times(1, 1); 7083 poll_thread_times(0, 1); 7084 CU_ASSERT(ctrlr1.adminq.is_connected == false); 7085 7086 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7087 poll_thread_times(0, 2); 7088 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7089 7090 poll_thread_times(0, 1); 7091 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7092 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7093 7094 poll_thread_times(1, 1); 7095 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7096 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7097 CU_ASSERT(nvme_ctrlr1->resetting == true); 7098 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7099 7100 poll_thread_times(0, 2); 7101 poll_thread_times(1, 1); 7102 poll_thread_times(0, 1); 7103 poll_thread_times(1, 1); 7104 poll_thread_times(0, 1); 7105 poll_thread_times(1, 1); 7106 poll_thread_times(0, 1); 7107 7108 CU_ASSERT(nvme_ctrlr1->resetting == false); 7109 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7110 CU_ASSERT(nvme_ctrlr2->resetting == true); 7111 7112 poll_threads(); 7113 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7114 poll_threads(); 7115 7116 CU_ASSERT(nvme_ctrlr2->resetting == false); 7117 CU_ASSERT(ctrlr_op_rc == 0); 7118 7119 set_thread(1); 7120 7121 spdk_put_io_channel(ch12); 7122 spdk_put_io_channel(ch22); 7123 7124 set_thread(0); 7125 7126 spdk_put_io_channel(ch11); 7127 spdk_put_io_channel(ch21); 7128 7129 poll_threads(); 7130 7131 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7132 CU_ASSERT(rc == 0); 7133 7134 poll_threads(); 7135 spdk_delay_us(1000); 7136 poll_threads(); 7137 7138 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7139 } 7140 7141 static void 7142 test_disable_enable_ctrlr(void) 7143 { 7144 struct spdk_nvme_transport_id trid = {}; 7145 struct spdk_nvme_ctrlr ctrlr = {}; 7146 struct nvme_ctrlr *nvme_ctrlr = NULL; 7147 struct nvme_path_id *curr_trid; 7148 struct spdk_io_channel *ch1, *ch2; 7149 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7150 int rc; 7151 7152 ut_init_trid(&trid); 7153 TAILQ_INIT(&ctrlr.active_io_qpairs); 7154 ctrlr.adminq.is_connected = true; 7155 7156 set_thread(0); 7157 7158 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7159 CU_ASSERT(rc == 0); 7160 7161 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7162 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7163 7164 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7165 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7166 7167 ch1 = spdk_get_io_channel(nvme_ctrlr); 7168 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7169 7170 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7171 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7172 7173 set_thread(1); 7174 7175 ch2 = spdk_get_io_channel(nvme_ctrlr); 7176 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7177 7178 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7179 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7180 7181 /* Disable starts from thread 1. */ 7182 set_thread(1); 7183 7184 /* Case 1: ctrlr is already disabled. */ 7185 nvme_ctrlr->disabled = true; 7186 7187 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7188 CU_ASSERT(rc == -EALREADY); 7189 7190 /* Case 2: ctrlr is already being destructed. */ 7191 nvme_ctrlr->disabled = false; 7192 nvme_ctrlr->destruct = true; 7193 7194 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7195 CU_ASSERT(rc == -ENXIO); 7196 7197 /* Case 3: reset is in progress. */ 7198 nvme_ctrlr->destruct = false; 7199 nvme_ctrlr->resetting = true; 7200 7201 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7202 CU_ASSERT(rc == -EBUSY); 7203 7204 /* Case 4: disable completes successfully. */ 7205 nvme_ctrlr->resetting = false; 7206 7207 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7208 CU_ASSERT(rc == 0); 7209 CU_ASSERT(nvme_ctrlr->resetting == true); 7210 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7211 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7212 7213 poll_thread_times(0, 3); 7214 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7215 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7216 7217 poll_thread_times(0, 1); 7218 poll_thread_times(1, 1); 7219 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7220 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7221 7222 poll_thread_times(1, 1); 7223 poll_thread_times(0, 1); 7224 CU_ASSERT(ctrlr.adminq.is_connected == false); 7225 poll_thread_times(1, 1); 7226 poll_thread_times(0, 1); 7227 poll_thread_times(1, 1); 7228 poll_thread_times(0, 1); 7229 CU_ASSERT(nvme_ctrlr->resetting == false); 7230 CU_ASSERT(nvme_ctrlr->disabled == true); 7231 7232 /* Case 5: enable completes successfully. */ 7233 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7234 CU_ASSERT(rc == 0); 7235 7236 CU_ASSERT(nvme_ctrlr->resetting == true); 7237 CU_ASSERT(nvme_ctrlr->disabled == false); 7238 7239 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7240 poll_thread_times(0, 2); 7241 CU_ASSERT(ctrlr.adminq.is_connected == true); 7242 7243 poll_thread_times(0, 1); 7244 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7245 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7246 7247 poll_thread_times(1, 1); 7248 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7249 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7250 CU_ASSERT(nvme_ctrlr->resetting == true); 7251 7252 poll_thread_times(0, 2); 7253 CU_ASSERT(nvme_ctrlr->resetting == true); 7254 poll_thread_times(1, 1); 7255 CU_ASSERT(nvme_ctrlr->resetting == true); 7256 poll_thread_times(0, 1); 7257 CU_ASSERT(nvme_ctrlr->resetting == false); 7258 7259 /* Case 6: ctrlr is already enabled. */ 7260 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7261 CU_ASSERT(rc == -EALREADY); 7262 7263 set_thread(0); 7264 7265 /* Case 7: disable cancels delayed reconnect. */ 7266 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7267 ctrlr.fail_reset = true; 7268 7269 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7270 CU_ASSERT(rc == 0); 7271 7272 poll_threads(); 7273 7274 CU_ASSERT(nvme_ctrlr->resetting == false); 7275 CU_ASSERT(ctrlr.is_failed == false); 7276 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7277 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7278 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7279 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7280 7281 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7282 CU_ASSERT(rc == 0); 7283 7284 CU_ASSERT(nvme_ctrlr->resetting == true); 7285 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7286 7287 poll_threads(); 7288 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7289 poll_threads(); 7290 7291 CU_ASSERT(nvme_ctrlr->resetting == false); 7292 CU_ASSERT(nvme_ctrlr->disabled == true); 7293 7294 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7295 CU_ASSERT(rc == 0); 7296 7297 CU_ASSERT(nvme_ctrlr->resetting == true); 7298 CU_ASSERT(nvme_ctrlr->disabled == false); 7299 7300 poll_threads(); 7301 7302 CU_ASSERT(nvme_ctrlr->resetting == false); 7303 7304 set_thread(1); 7305 7306 spdk_put_io_channel(ch2); 7307 7308 set_thread(0); 7309 7310 spdk_put_io_channel(ch1); 7311 7312 poll_threads(); 7313 7314 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7315 CU_ASSERT(rc == 0); 7316 7317 poll_threads(); 7318 spdk_delay_us(1000); 7319 poll_threads(); 7320 7321 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7322 } 7323 7324 static void 7325 ut_delete_done(void *ctx, int rc) 7326 { 7327 int *delete_done_rc = ctx; 7328 *delete_done_rc = rc; 7329 } 7330 7331 static void 7332 test_delete_ctrlr_done(void) 7333 { 7334 struct spdk_nvme_transport_id trid = {}; 7335 struct spdk_nvme_ctrlr ctrlr = {}; 7336 int delete_done_rc = 0xDEADBEEF; 7337 int rc; 7338 7339 ut_init_trid(&trid); 7340 7341 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7342 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7343 7344 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7345 CU_ASSERT(rc == 0); 7346 7347 for (int i = 0; i < 20; i++) { 7348 poll_threads(); 7349 if (delete_done_rc == 0) { 7350 break; 7351 } 7352 spdk_delay_us(1000); 7353 } 7354 7355 CU_ASSERT(delete_done_rc == 0); 7356 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7357 } 7358 7359 static void 7360 test_ns_remove_during_reset(void) 7361 { 7362 struct nvme_path_id path = {}; 7363 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 7364 struct spdk_nvme_ctrlr *ctrlr; 7365 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7366 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7367 struct nvme_ctrlr *nvme_ctrlr; 7368 const int STRING_SIZE = 32; 7369 const char *attached_names[STRING_SIZE]; 7370 struct nvme_bdev *bdev; 7371 struct nvme_ns *nvme_ns; 7372 union spdk_nvme_async_event_completion event = {}; 7373 struct spdk_nvme_cpl cpl = {}; 7374 int rc; 7375 7376 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7377 ut_init_trid(&path.trid); 7378 7379 set_thread(0); 7380 7381 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7382 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7383 7384 g_ut_attach_ctrlr_status = 0; 7385 g_ut_attach_bdev_count = 1; 7386 7387 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7388 attach_ctrlr_done, NULL, &dopts, &opts, false); 7389 CU_ASSERT(rc == 0); 7390 7391 spdk_delay_us(1000); 7392 poll_threads(); 7393 7394 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7395 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7396 7397 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 7398 CU_ASSERT(nvme_ctrlr != NULL); 7399 7400 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7401 CU_ASSERT(bdev != NULL); 7402 7403 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7404 CU_ASSERT(nvme_ns != NULL); 7405 7406 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7407 * but nvme_ns->ns should be NULL. 7408 */ 7409 7410 CU_ASSERT(ctrlr->ns[0].is_active == true); 7411 ctrlr->ns[0].is_active = false; 7412 7413 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7414 CU_ASSERT(rc == 0); 7415 7416 poll_threads(); 7417 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7418 poll_threads(); 7419 7420 CU_ASSERT(nvme_ctrlr->resetting == false); 7421 CU_ASSERT(ctrlr->adminq.is_connected == true); 7422 7423 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7424 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7425 CU_ASSERT(nvme_ns->bdev == bdev); 7426 CU_ASSERT(nvme_ns->ns == NULL); 7427 7428 /* Then, async event should fill nvme_ns->ns again. */ 7429 7430 ctrlr->ns[0].is_active = true; 7431 7432 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7433 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7434 cpl.cdw0 = event.raw; 7435 7436 aer_cb(nvme_ctrlr, &cpl); 7437 7438 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7439 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7440 CU_ASSERT(nvme_ns->bdev == bdev); 7441 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7442 7443 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7444 CU_ASSERT(rc == 0); 7445 7446 poll_threads(); 7447 spdk_delay_us(1000); 7448 poll_threads(); 7449 7450 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7451 } 7452 7453 static void 7454 test_io_path_is_current(void) 7455 { 7456 struct nvme_bdev_channel nbdev_ch = { 7457 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 7458 }; 7459 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 7460 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 7461 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 7462 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }, 7463 nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 7464 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {}; 7465 struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 7466 struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 7467 struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, }; 7468 struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, }; 7469 struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, }; 7470 struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, }; 7471 struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 7472 struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 7473 struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 7474 7475 /* io_path1 is deleting */ 7476 io_path1.nbdev_ch = NULL; 7477 7478 CU_ASSERT(nvme_io_path_is_current(&io_path1) == false); 7479 7480 io_path1.nbdev_ch = &nbdev_ch; 7481 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 7482 io_path2.nbdev_ch = &nbdev_ch; 7483 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 7484 io_path3.nbdev_ch = &nbdev_ch; 7485 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 7486 7487 /* active/active: io_path is current if it is available and ANA optimized. */ 7488 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7489 7490 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7491 7492 /* active/active: io_path is not current if it is disconnected even if it is 7493 * ANA optimized. 7494 */ 7495 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7496 7497 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7498 7499 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7500 7501 /* active/passive: io_path is current if it is available and cached. 7502 * (only ANA optimized path is cached for active/passive.) 7503 */ 7504 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7505 nbdev_ch.current_io_path = &io_path2; 7506 7507 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7508 7509 /* active:passive: io_path is not current if it is disconnected even if it is cached */ 7510 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7511 7512 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7513 7514 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7515 7516 /* active/active and active/passive: io_path is not current if it is ANA inaccessible. */ 7517 nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 7518 7519 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7520 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7521 7522 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7523 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7524 7525 /* active/active: non-optimized path is current only if there is no optimized path. */ 7526 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7527 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7528 7529 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7530 7531 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7532 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7533 7534 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7535 7536 /* active/passive: current is true if it is the first one when there is no optimized path. */ 7537 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7538 nbdev_ch.current_io_path = NULL; 7539 7540 CU_ASSERT(nvme_io_path_is_current(&io_path1) == true); 7541 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7542 CU_ASSERT(nvme_io_path_is_current(&io_path3) == false); 7543 } 7544 7545 static void 7546 test_bdev_reset_abort_io(void) 7547 { 7548 struct spdk_nvme_transport_id trid = {}; 7549 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 7550 struct spdk_nvme_ctrlr *ctrlr; 7551 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7552 struct nvme_ctrlr *nvme_ctrlr; 7553 const int STRING_SIZE = 32; 7554 const char *attached_names[STRING_SIZE]; 7555 struct nvme_bdev *bdev; 7556 struct spdk_bdev_io *write_io, *read_io, *reset_io; 7557 struct spdk_io_channel *ch1, *ch2; 7558 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 7559 struct nvme_io_path *io_path1, *io_path2; 7560 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 7561 int rc; 7562 7563 g_opts.bdev_retry_count = -1; 7564 7565 ut_init_trid(&trid); 7566 7567 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 7568 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7569 7570 g_ut_attach_ctrlr_status = 0; 7571 g_ut_attach_bdev_count = 1; 7572 7573 set_thread(1); 7574 7575 opts.ctrlr_loss_timeout_sec = -1; 7576 opts.reconnect_delay_sec = 1; 7577 7578 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 7579 attach_ctrlr_done, NULL, &dopts, &opts, false); 7580 CU_ASSERT(rc == 0); 7581 7582 spdk_delay_us(1000); 7583 poll_threads(); 7584 7585 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7586 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7587 7588 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 7589 SPDK_CU_ASSERT_FATAL(bdev != NULL); 7590 7591 set_thread(0); 7592 7593 ch1 = spdk_get_io_channel(bdev); 7594 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7595 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 7596 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 7597 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 7598 nvme_qpair1 = io_path1->qpair; 7599 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 7600 7601 set_thread(1); 7602 7603 ch2 = spdk_get_io_channel(bdev); 7604 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7605 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 7606 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 7607 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 7608 nvme_qpair2 = io_path2->qpair; 7609 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 7610 7611 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch1); 7612 ut_bdev_io_set_buf(write_io); 7613 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 7614 7615 read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, bdev, ch1); 7616 ut_bdev_io_set_buf(read_io); 7617 read_io->internal.ch = (struct spdk_bdev_channel *)ch1; 7618 7619 reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 7620 7621 /* If qpair is disconnected, it is freed and then reconnected via resetting 7622 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 7623 * while resetting the nvme_ctrlr. 7624 */ 7625 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7626 7627 poll_thread_times(0, 3); 7628 7629 CU_ASSERT(nvme_qpair1->qpair == NULL); 7630 CU_ASSERT(nvme_ctrlr->resetting == true); 7631 7632 set_thread(0); 7633 7634 write_io->internal.in_submit_request = true; 7635 7636 bdev_nvme_submit_request(ch1, write_io); 7637 7638 CU_ASSERT(write_io->internal.in_submit_request == true); 7639 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 7640 7641 set_thread(1); 7642 7643 /* Submit a reset request to a bdev while resetting a nvme_ctrlr. 7644 * Further I/O queueing should be disabled and queued I/Os should be aborted. 7645 * Verify these behaviors. 7646 */ 7647 reset_io->internal.in_submit_request = true; 7648 7649 bdev_nvme_submit_request(ch2, reset_io); 7650 7651 poll_thread_times(0, 1); 7652 poll_thread_times(1, 2); 7653 7654 CU_ASSERT(nbdev_ch1->resetting == true); 7655 7656 /* qpair1 should be still disconnected. */ 7657 CU_ASSERT(nvme_qpair1->qpair == NULL); 7658 7659 set_thread(0); 7660 7661 read_io->internal.in_submit_request = true; 7662 7663 bdev_nvme_submit_request(ch1, read_io); 7664 7665 CU_ASSERT(nvme_qpair1->qpair == NULL); 7666 7667 poll_thread_times(0, 1); 7668 7669 /* The I/O which was submitted during bdev_reset should fail immediately. */ 7670 CU_ASSERT(read_io->internal.in_submit_request == false); 7671 CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 7672 7673 poll_threads(); 7674 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7675 poll_threads(); 7676 7677 /* The completion of bdev_reset should ensure queued I/O is aborted. */ 7678 CU_ASSERT(write_io->internal.in_submit_request == false); 7679 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 7680 7681 /* The reset request itself should complete with success. */ 7682 CU_ASSERT(reset_io->internal.in_submit_request == false); 7683 CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 7684 7685 set_thread(0); 7686 7687 spdk_put_io_channel(ch1); 7688 7689 set_thread(1); 7690 7691 spdk_put_io_channel(ch2); 7692 7693 poll_threads(); 7694 7695 set_thread(0); 7696 7697 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7698 CU_ASSERT(rc == 0); 7699 7700 poll_threads(); 7701 spdk_delay_us(1000); 7702 poll_threads(); 7703 7704 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7705 7706 free(write_io); 7707 free(read_io); 7708 free(reset_io); 7709 7710 g_opts.bdev_retry_count = 0; 7711 } 7712 7713 int 7714 main(int argc, char **argv) 7715 { 7716 CU_pSuite suite = NULL; 7717 unsigned int num_failures; 7718 7719 CU_initialize_registry(); 7720 7721 suite = CU_add_suite("nvme", NULL, NULL); 7722 7723 CU_ADD_TEST(suite, test_create_ctrlr); 7724 CU_ADD_TEST(suite, test_reset_ctrlr); 7725 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7726 CU_ADD_TEST(suite, test_failover_ctrlr); 7727 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7728 CU_ADD_TEST(suite, test_pending_reset); 7729 CU_ADD_TEST(suite, test_attach_ctrlr); 7730 CU_ADD_TEST(suite, test_aer_cb); 7731 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7732 CU_ADD_TEST(suite, test_add_remove_trid); 7733 CU_ADD_TEST(suite, test_abort); 7734 CU_ADD_TEST(suite, test_get_io_qpair); 7735 CU_ADD_TEST(suite, test_bdev_unregister); 7736 CU_ADD_TEST(suite, test_compare_ns); 7737 CU_ADD_TEST(suite, test_init_ana_log_page); 7738 CU_ADD_TEST(suite, test_get_memory_domains); 7739 CU_ADD_TEST(suite, test_reconnect_qpair); 7740 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7741 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7742 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7743 CU_ADD_TEST(suite, test_admin_path); 7744 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7745 CU_ADD_TEST(suite, test_find_io_path); 7746 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7747 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7748 CU_ADD_TEST(suite, test_retry_io_count); 7749 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7750 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7751 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7752 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7753 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7754 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7755 CU_ADD_TEST(suite, test_fail_path); 7756 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7757 CU_ADD_TEST(suite, test_ana_transition); 7758 CU_ADD_TEST(suite, test_set_preferred_path); 7759 CU_ADD_TEST(suite, test_find_next_io_path); 7760 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7761 CU_ADD_TEST(suite, test_disable_auto_failback); 7762 CU_ADD_TEST(suite, test_set_multipath_policy); 7763 CU_ADD_TEST(suite, test_uuid_generation); 7764 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7765 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7766 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7767 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7768 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7769 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7770 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7771 CU_ADD_TEST(suite, test_io_path_is_current); 7772 CU_ADD_TEST(suite, test_bdev_reset_abort_io); 7773 7774 allocate_threads(3); 7775 set_thread(0); 7776 bdev_nvme_library_init(); 7777 init_accel(); 7778 7779 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7780 7781 set_thread(0); 7782 bdev_nvme_library_fini(); 7783 fini_accel(); 7784 free_threads(); 7785 7786 CU_cleanup_registry(); 7787 7788 return num_failures; 7789 } 7790