1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_internal/cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 12 #include "common/lib/ut_multithread.c" 13 14 #include "bdev/nvme/bdev_nvme.c" 15 16 #include "unit/lib/json_mock.c" 17 18 #include "bdev/nvme/bdev_mdns_client.c" 19 20 static void *g_accel_p = (void *)0xdeadbeaf; 21 22 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 23 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 24 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 25 spdk_nvme_remove_cb remove_cb), NULL); 26 27 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 28 enum spdk_nvme_transport_type trtype)); 29 30 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 31 NULL); 32 33 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 34 35 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 36 struct spdk_nvme_transport_id *trid), 0); 37 38 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 39 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 40 41 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 42 DEFINE_STUB(spdk_nvme_ctrlr_get_max_sges, uint16_t, (const struct spdk_nvme_ctrlr *ctrlr), 0); 43 DEFINE_STUB(spdk_nvme_ctrlr_get_numa_id, int32_t, (struct spdk_nvme_ctrlr *ctrlr), 44 SPDK_ENV_NUMA_ID_ANY); 45 46 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 47 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 48 49 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 50 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 51 52 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 53 54 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 55 int error_code, const char *msg)); 56 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 57 (struct spdk_jsonrpc_request *request), NULL); 58 DEFINE_STUB_V(spdk_jsonrpc_end_result, 59 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 60 61 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 62 size_t opts_size)); 63 64 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 65 size_t opts_size), 0); 66 DEFINE_STUB(spdk_nvme_dhchap_get_digest_name, const char *, (int id), NULL); 67 DEFINE_STUB(spdk_nvme_dhchap_get_dhgroup_name, const char *, (int id), NULL); 68 69 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 70 71 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 72 enum spdk_bdev_reset_stat_mode mode)); 73 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 74 struct spdk_bdev_io_stat *add)); 75 76 DEFINE_STUB_V(spdk_nvme_qpair_set_abort_dnr, (struct spdk_nvme_qpair *qpair, bool dnr)); 77 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 78 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 79 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 80 DEFINE_STUB(spdk_nvme_scan_attached, int, (const struct spdk_nvme_transport_id *trid), 0); 81 82 int 83 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 84 struct spdk_memory_domain **domains, int array_size) 85 { 86 int i, min_array_size; 87 88 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 89 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 90 for (i = 0; i < min_array_size; i++) { 91 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 92 } 93 } 94 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 95 96 return 0; 97 } 98 99 struct spdk_io_channel * 100 spdk_accel_get_io_channel(void) 101 { 102 return spdk_get_io_channel(g_accel_p); 103 } 104 105 void 106 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 107 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 108 { 109 /* Avoid warning that opts is used uninitialised */ 110 memset(opts, 0, opts_size); 111 } 112 113 #define UT_HOSTNQN "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003" 114 115 static const struct spdk_nvme_ctrlr_opts g_ut_ctrlr_opts = {.hostnqn = UT_HOSTNQN}; 116 117 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 118 (struct spdk_nvme_ctrlr *ctrlr), &g_ut_ctrlr_opts); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 121 (const struct spdk_nvme_ctrlr *ctrlr), 0); 122 123 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 124 (struct spdk_nvme_ctrlr *ctrlr), NULL); 125 126 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 127 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 128 129 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 130 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 131 132 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 133 134 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 135 136 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 137 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 138 139 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 140 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 141 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 142 143 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 144 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 145 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 146 147 DEFINE_STUB(spdk_nvme_ctrlr_cmd_iov_raw_with_md, int, ( 148 struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 149 struct spdk_nvme_cmd *cmd, uint32_t len, void *md_buf, 150 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 151 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 152 spdk_nvme_req_next_sge_cb next_sge_fn), 0); 153 154 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 155 size_t *size), 0); 156 157 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 158 159 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 160 161 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 162 163 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_ns_get_pi_format, enum spdk_nvme_pi_format, (struct spdk_nvme_ns *ns), 166 SPDK_NVME_16B_GUARD_PI); 167 168 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 169 170 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 171 172 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 173 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 174 175 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 176 177 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 178 char *name, size_t *size), 0); 179 180 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 181 (struct spdk_nvme_ns *ns), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 184 (const struct spdk_nvme_ctrlr *ctrlr), 0); 185 186 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 187 (struct spdk_nvme_ns *ns), 0); 188 189 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 190 (struct spdk_nvme_ns *ns), 0); 191 192 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 193 (struct spdk_nvme_ns *ns), 0); 194 195 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 196 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 197 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 198 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 199 200 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 201 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 202 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 203 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 204 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 205 206 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 208 void *payload, uint32_t payload_size, uint64_t slba, 209 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 210 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 211 212 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 213 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 214 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 215 216 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 217 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 218 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 219 220 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 221 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 222 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 223 224 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 225 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 226 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 227 228 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 229 230 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 231 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 232 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 233 234 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 235 (const struct spdk_nvme_status *status), NULL); 236 237 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 238 (const struct spdk_nvme_status *status), NULL); 239 240 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 241 242 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 243 244 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 245 246 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 247 248 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 249 250 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 251 struct iovec *iov, 252 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 253 DEFINE_STUB(spdk_accel_append_crc32c, int, 254 (struct spdk_accel_sequence **seq, struct spdk_io_channel *ch, uint32_t *dst, 255 struct iovec *iovs, uint32_t iovcnt, struct spdk_memory_domain *domain, void *domain_ctx, 256 uint32_t seed, spdk_accel_step_cb cb_fn, void *cb_arg), 0); 257 DEFINE_STUB(spdk_accel_append_copy, int, 258 (struct spdk_accel_sequence **pseq, struct spdk_io_channel *ch, 259 struct iovec *dst_iovs, uint32_t dst_iovcnt, 260 struct spdk_memory_domain *dst_domain, void *dst_domain_ctx, 261 struct iovec *src_iovs, uint32_t src_iovcnt, 262 struct spdk_memory_domain *src_domain, void *src_domain_ctx, 263 spdk_accel_step_cb cb_fn, void *cb_arg), 0); 264 DEFINE_STUB_V(spdk_accel_sequence_finish, 265 (struct spdk_accel_sequence *seq, spdk_accel_completion_cb cb_fn, void *cb_arg)); 266 DEFINE_STUB_V(spdk_accel_sequence_abort, (struct spdk_accel_sequence *seq)); 267 DEFINE_STUB_V(spdk_accel_sequence_reverse, (struct spdk_accel_sequence *seq)); 268 269 struct ut_nvme_req { 270 uint16_t opc; 271 spdk_nvme_cmd_cb cb_fn; 272 void *cb_arg; 273 struct spdk_nvme_cpl cpl; 274 TAILQ_ENTRY(ut_nvme_req) tailq; 275 }; 276 277 struct spdk_nvme_ns { 278 struct spdk_nvme_ctrlr *ctrlr; 279 uint32_t id; 280 bool is_active; 281 struct spdk_uuid *uuid; 282 enum spdk_nvme_ana_state ana_state; 283 enum spdk_nvme_csi csi; 284 }; 285 286 struct spdk_nvme_qpair { 287 struct spdk_nvme_ctrlr *ctrlr; 288 uint8_t failure_reason; 289 bool is_connected; 290 bool in_completion_context; 291 bool delete_after_completion_context; 292 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 293 uint32_t num_outstanding_reqs; 294 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 295 struct spdk_nvme_poll_group *poll_group; 296 void *poll_group_tailq_head; 297 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 298 }; 299 300 struct spdk_nvme_ctrlr { 301 uint32_t num_ns; 302 struct spdk_nvme_ns *ns; 303 struct spdk_nvme_ns_data *nsdata; 304 struct spdk_nvme_qpair adminq; 305 struct spdk_nvme_ctrlr_data cdata; 306 bool attached; 307 bool is_failed; 308 bool fail_reset; 309 bool is_removed; 310 struct spdk_nvme_transport_id trid; 311 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 312 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 313 struct spdk_nvme_ctrlr_opts opts; 314 }; 315 316 struct spdk_nvme_poll_group { 317 void *ctx; 318 struct spdk_nvme_accel_fn_table accel_fn_table; 319 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 320 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 321 }; 322 323 struct spdk_nvme_probe_ctx { 324 struct spdk_nvme_transport_id trid; 325 void *cb_ctx; 326 spdk_nvme_attach_cb attach_cb; 327 struct spdk_nvme_ctrlr *init_ctrlr; 328 }; 329 330 uint32_t 331 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 332 { 333 uint32_t nsid; 334 335 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 336 if (ctrlr->ns[nsid - 1].is_active) { 337 return nsid; 338 } 339 } 340 341 return 0; 342 } 343 344 uint32_t 345 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 346 { 347 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 348 if (ctrlr->ns[nsid - 1].is_active) { 349 return nsid; 350 } 351 } 352 353 return 0; 354 } 355 356 uint32_t 357 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 358 { 359 return qpair->num_outstanding_reqs; 360 } 361 362 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 363 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 364 g_ut_attached_ctrlrs); 365 static int g_ut_attach_ctrlr_status; 366 static size_t g_ut_attach_bdev_count; 367 static int g_ut_register_bdev_status; 368 static struct spdk_bdev *g_ut_registered_bdev; 369 static uint16_t g_ut_cntlid; 370 static struct nvme_path_id g_any_path = {}; 371 372 static void 373 ut_init_trid(struct spdk_nvme_transport_id *trid) 374 { 375 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 376 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 377 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 378 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 379 } 380 381 static void 382 ut_init_trid2(struct spdk_nvme_transport_id *trid) 383 { 384 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 385 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 386 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 387 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 388 } 389 390 static void 391 ut_init_trid3(struct spdk_nvme_transport_id *trid) 392 { 393 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 394 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 395 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 396 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 397 } 398 399 static int 400 cmp_int(int a, int b) 401 { 402 return a - b; 403 } 404 405 int 406 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 407 const struct spdk_nvme_transport_id *trid2) 408 { 409 int cmp; 410 411 /* We assume trtype is TCP for now. */ 412 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 413 414 cmp = cmp_int(trid1->trtype, trid2->trtype); 415 if (cmp) { 416 return cmp; 417 } 418 419 cmp = strcasecmp(trid1->traddr, trid2->traddr); 420 if (cmp) { 421 return cmp; 422 } 423 424 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 425 if (cmp) { 426 return cmp; 427 } 428 429 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 430 if (cmp) { 431 return cmp; 432 } 433 434 cmp = strcmp(trid1->subnqn, trid2->subnqn); 435 if (cmp) { 436 return cmp; 437 } 438 439 return 0; 440 } 441 442 static struct spdk_nvme_ctrlr * 443 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 444 bool ana_reporting, bool multipath) 445 { 446 struct spdk_nvme_ctrlr *ctrlr; 447 uint32_t i; 448 449 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 450 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 451 /* There is a ctrlr whose trid matches. */ 452 return NULL; 453 } 454 } 455 456 ctrlr = calloc(1, sizeof(*ctrlr)); 457 if (ctrlr == NULL) { 458 return NULL; 459 } 460 461 ctrlr->attached = true; 462 ctrlr->adminq.ctrlr = ctrlr; 463 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 464 ctrlr->adminq.is_connected = true; 465 466 if (num_ns != 0) { 467 ctrlr->num_ns = num_ns; 468 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 469 if (ctrlr->ns == NULL) { 470 free(ctrlr); 471 return NULL; 472 } 473 474 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 475 if (ctrlr->nsdata == NULL) { 476 free(ctrlr->ns); 477 free(ctrlr); 478 return NULL; 479 } 480 481 for (i = 0; i < num_ns; i++) { 482 ctrlr->ns[i].id = i + 1; 483 ctrlr->ns[i].ctrlr = ctrlr; 484 ctrlr->ns[i].is_active = true; 485 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 486 ctrlr->nsdata[i].nsze = 1024; 487 ctrlr->nsdata[i].nmic.can_share = multipath; 488 } 489 490 ctrlr->cdata.nn = num_ns; 491 ctrlr->cdata.mnan = num_ns; 492 ctrlr->cdata.nanagrpid = num_ns; 493 } 494 495 ctrlr->cdata.cntlid = ++g_ut_cntlid; 496 ctrlr->cdata.cmic.multi_ctrlr = multipath; 497 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 498 ctrlr->trid = *trid; 499 TAILQ_INIT(&ctrlr->active_io_qpairs); 500 501 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 502 503 return ctrlr; 504 } 505 506 static void 507 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 508 { 509 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 510 511 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 512 free(ctrlr->nsdata); 513 free(ctrlr->ns); 514 free(ctrlr); 515 } 516 517 static int 518 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 519 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 520 { 521 struct ut_nvme_req *req; 522 523 req = calloc(1, sizeof(*req)); 524 if (req == NULL) { 525 return -ENOMEM; 526 } 527 528 req->opc = opc; 529 req->cb_fn = cb_fn; 530 req->cb_arg = cb_arg; 531 532 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 533 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 534 535 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 536 qpair->num_outstanding_reqs++; 537 538 return 0; 539 } 540 541 static struct ut_nvme_req * 542 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 543 { 544 struct ut_nvme_req *req; 545 546 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 547 if (req->cb_arg == cb_arg) { 548 break; 549 } 550 } 551 552 return req; 553 } 554 555 static struct spdk_bdev_io * 556 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 557 struct spdk_io_channel *ch) 558 { 559 struct spdk_bdev_io *bdev_io; 560 561 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 562 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 563 bdev_io->type = type; 564 bdev_io->bdev = &nbdev->disk; 565 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 566 567 return bdev_io; 568 } 569 570 static void 571 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 572 { 573 bdev_io->u.bdev.iovs = &bdev_io->iov; 574 bdev_io->u.bdev.iovcnt = 1; 575 576 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 577 bdev_io->iov.iov_len = 4096; 578 } 579 580 static void 581 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 582 { 583 if (ctrlr->is_failed) { 584 free(ctrlr); 585 return; 586 } 587 588 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 589 if (probe_ctx->cb_ctx) { 590 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 591 } 592 593 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 594 595 if (probe_ctx->attach_cb) { 596 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 597 } 598 } 599 600 int 601 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 602 { 603 struct spdk_nvme_ctrlr *ctrlr, *tmp; 604 605 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 606 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 607 continue; 608 } 609 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 610 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 611 } 612 613 free(probe_ctx); 614 615 return 0; 616 } 617 618 struct spdk_nvme_probe_ctx * 619 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 620 const struct spdk_nvme_ctrlr_opts *opts, 621 spdk_nvme_attach_cb attach_cb) 622 { 623 struct spdk_nvme_probe_ctx *probe_ctx; 624 625 if (trid == NULL) { 626 return NULL; 627 } 628 629 probe_ctx = calloc(1, sizeof(*probe_ctx)); 630 if (probe_ctx == NULL) { 631 return NULL; 632 } 633 634 probe_ctx->trid = *trid; 635 probe_ctx->cb_ctx = (void *)opts; 636 probe_ctx->attach_cb = attach_cb; 637 638 return probe_ctx; 639 } 640 641 int 642 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 643 { 644 if (ctrlr->attached) { 645 ut_detach_ctrlr(ctrlr); 646 } 647 648 return 0; 649 } 650 651 int 652 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 653 { 654 SPDK_CU_ASSERT_FATAL(ctx != NULL); 655 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 656 657 return 0; 658 } 659 660 int 661 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 662 { 663 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 664 } 665 666 void 667 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 668 { 669 memset(opts, 0, opts_size); 670 671 snprintf(opts->hostnqn, sizeof(opts->hostnqn), UT_HOSTNQN); 672 } 673 674 const struct spdk_nvme_ctrlr_data * 675 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 676 { 677 return &ctrlr->cdata; 678 } 679 680 uint32_t 681 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 682 { 683 return ctrlr->num_ns; 684 } 685 686 struct spdk_nvme_ns * 687 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 688 { 689 if (nsid < 1 || nsid > ctrlr->num_ns) { 690 return NULL; 691 } 692 693 return &ctrlr->ns[nsid - 1]; 694 } 695 696 bool 697 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 698 { 699 if (nsid < 1 || nsid > ctrlr->num_ns) { 700 return false; 701 } 702 703 return ctrlr->ns[nsid - 1].is_active; 704 } 705 706 union spdk_nvme_csts_register 707 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 708 { 709 union spdk_nvme_csts_register csts; 710 711 csts.raw = 0; 712 713 return csts; 714 } 715 716 union spdk_nvme_vs_register 717 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 718 { 719 union spdk_nvme_vs_register vs; 720 721 vs.raw = 0; 722 723 return vs; 724 } 725 726 struct spdk_nvme_qpair * 727 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 728 const struct spdk_nvme_io_qpair_opts *user_opts, 729 size_t opts_size) 730 { 731 struct spdk_nvme_qpair *qpair; 732 733 qpair = calloc(1, sizeof(*qpair)); 734 if (qpair == NULL) { 735 return NULL; 736 } 737 738 qpair->ctrlr = ctrlr; 739 TAILQ_INIT(&qpair->outstanding_reqs); 740 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 741 742 return qpair; 743 } 744 745 static void 746 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 747 { 748 struct spdk_nvme_poll_group *group = qpair->poll_group; 749 750 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 751 752 qpair->poll_group_tailq_head = &group->connected_qpairs; 753 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 754 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 755 } 756 757 static void 758 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 759 { 760 struct spdk_nvme_poll_group *group = qpair->poll_group; 761 762 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 763 764 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 765 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 766 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 767 } 768 769 int 770 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 771 struct spdk_nvme_qpair *qpair) 772 { 773 if (qpair->is_connected) { 774 return -EISCONN; 775 } 776 777 qpair->is_connected = true; 778 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 779 780 if (qpair->poll_group) { 781 nvme_poll_group_connect_qpair(qpair); 782 } 783 784 return 0; 785 } 786 787 void 788 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 789 { 790 if (!qpair->is_connected) { 791 return; 792 } 793 794 qpair->is_connected = false; 795 796 if (qpair->poll_group != NULL) { 797 nvme_poll_group_disconnect_qpair(qpair); 798 } 799 } 800 801 int 802 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 803 { 804 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 805 806 if (qpair->in_completion_context) { 807 qpair->delete_after_completion_context = true; 808 return 0; 809 } 810 811 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 812 813 if (qpair->poll_group != NULL) { 814 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 815 } 816 817 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 818 819 CU_ASSERT(qpair->num_outstanding_reqs == 0); 820 821 free(qpair); 822 823 return 0; 824 } 825 826 int 827 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 828 { 829 if (ctrlr->fail_reset) { 830 ctrlr->is_failed = true; 831 return -EIO; 832 } 833 834 ctrlr->adminq.is_connected = true; 835 return 0; 836 } 837 838 void 839 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 840 { 841 } 842 843 int 844 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 845 { 846 if (ctrlr->is_removed) { 847 return -ENXIO; 848 } 849 850 ctrlr->adminq.is_connected = false; 851 ctrlr->is_failed = false; 852 853 return 0; 854 } 855 856 void 857 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 858 { 859 ctrlr->is_failed = true; 860 } 861 862 bool 863 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 864 { 865 return ctrlr->is_failed; 866 } 867 868 spdk_nvme_qp_failure_reason 869 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 870 { 871 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 872 } 873 874 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 875 sizeof(uint32_t)) 876 static void 877 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 878 { 879 struct spdk_nvme_ana_page ana_hdr; 880 char _ana_desc[UT_ANA_DESC_SIZE]; 881 struct spdk_nvme_ana_group_descriptor *ana_desc; 882 struct spdk_nvme_ns *ns; 883 uint32_t i; 884 885 memset(&ana_hdr, 0, sizeof(ana_hdr)); 886 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 887 888 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 889 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 890 891 buf += sizeof(ana_hdr); 892 length -= sizeof(ana_hdr); 893 894 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 895 896 for (i = 0; i < ctrlr->num_ns; i++) { 897 ns = &ctrlr->ns[i]; 898 899 if (!ns->is_active) { 900 continue; 901 } 902 903 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 904 905 ana_desc->ana_group_id = ns->id; 906 ana_desc->num_of_nsid = 1; 907 ana_desc->ana_state = ns->ana_state; 908 ana_desc->nsid[0] = ns->id; 909 910 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 911 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 912 913 buf += UT_ANA_DESC_SIZE; 914 length -= UT_ANA_DESC_SIZE; 915 } 916 } 917 918 int 919 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 920 uint8_t log_page, uint32_t nsid, 921 void *payload, uint32_t payload_size, 922 uint64_t offset, 923 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 924 { 925 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 926 SPDK_CU_ASSERT_FATAL(offset == 0); 927 ut_create_ana_log_page(ctrlr, payload, payload_size); 928 } 929 930 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 931 cb_fn, cb_arg); 932 } 933 934 int 935 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 936 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 937 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 938 { 939 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 940 } 941 942 int 943 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 944 void *cmd_cb_arg, 945 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 946 { 947 struct ut_nvme_req *req = NULL, *abort_req; 948 949 if (qpair == NULL) { 950 qpair = &ctrlr->adminq; 951 } 952 953 abort_req = calloc(1, sizeof(*abort_req)); 954 if (abort_req == NULL) { 955 return -ENOMEM; 956 } 957 958 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 959 if (req->cb_arg == cmd_cb_arg) { 960 break; 961 } 962 } 963 964 if (req == NULL) { 965 free(abort_req); 966 return -ENOENT; 967 } 968 969 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 970 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 971 972 abort_req->opc = SPDK_NVME_OPC_ABORT; 973 abort_req->cb_fn = cb_fn; 974 abort_req->cb_arg = cb_arg; 975 976 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 977 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 978 abort_req->cpl.cdw0 = 0; 979 980 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 981 ctrlr->adminq.num_outstanding_reqs++; 982 983 return 0; 984 } 985 986 int32_t 987 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 988 { 989 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 990 } 991 992 uint32_t 993 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 994 { 995 return ns->id; 996 } 997 998 struct spdk_nvme_ctrlr * 999 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 1000 { 1001 return ns->ctrlr; 1002 } 1003 1004 static inline struct spdk_nvme_ns_data * 1005 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 1006 { 1007 return &ns->ctrlr->nsdata[ns->id - 1]; 1008 } 1009 1010 const struct spdk_nvme_ns_data * 1011 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 1012 { 1013 return _nvme_ns_get_data(ns); 1014 } 1015 1016 uint64_t 1017 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 1018 { 1019 return _nvme_ns_get_data(ns)->nsze; 1020 } 1021 1022 const struct spdk_uuid * 1023 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 1024 { 1025 return ns->uuid; 1026 } 1027 1028 enum spdk_nvme_csi 1029 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 1030 return ns->csi; 1031 } 1032 1033 int 1034 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1035 void *metadata, uint64_t lba, uint32_t lba_count, 1036 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1037 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1038 { 1039 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1040 } 1041 1042 int 1043 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1044 void *buffer, void *metadata, uint64_t lba, 1045 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1046 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1047 { 1048 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1049 } 1050 1051 int 1052 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1053 uint64_t lba, uint32_t lba_count, 1054 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1055 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1056 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1057 uint16_t apptag_mask, uint16_t apptag) 1058 { 1059 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1060 } 1061 1062 int 1063 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1064 uint64_t lba, uint32_t lba_count, 1065 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1066 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1067 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1068 uint16_t apptag_mask, uint16_t apptag) 1069 { 1070 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1071 } 1072 1073 static bool g_ut_readv_ext_called; 1074 int 1075 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1076 uint64_t lba, uint32_t lba_count, 1077 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1078 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1079 spdk_nvme_req_next_sge_cb next_sge_fn, 1080 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1081 { 1082 g_ut_readv_ext_called = true; 1083 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1084 } 1085 1086 static bool g_ut_read_ext_called; 1087 int 1088 spdk_nvme_ns_cmd_read_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1089 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1090 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1091 { 1092 g_ut_read_ext_called = true; 1093 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1094 } 1095 1096 static bool g_ut_writev_ext_called; 1097 int 1098 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1099 uint64_t lba, uint32_t lba_count, 1100 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1101 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1102 spdk_nvme_req_next_sge_cb next_sge_fn, 1103 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1104 { 1105 g_ut_writev_ext_called = true; 1106 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1107 } 1108 1109 static bool g_ut_write_ext_called; 1110 int 1111 spdk_nvme_ns_cmd_write_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 1112 uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1113 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1114 { 1115 g_ut_write_ext_called = true; 1116 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1117 } 1118 1119 int 1120 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1121 uint64_t lba, uint32_t lba_count, 1122 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1123 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1124 spdk_nvme_req_next_sge_cb next_sge_fn, 1125 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1126 { 1127 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1128 } 1129 1130 int 1131 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1132 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1133 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1134 { 1135 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1136 } 1137 1138 int 1139 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1140 uint64_t lba, uint32_t lba_count, 1141 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1142 uint32_t io_flags) 1143 { 1144 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1145 } 1146 1147 int 1148 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1149 const struct spdk_nvme_scc_source_range *ranges, 1150 uint16_t num_ranges, uint64_t dest_lba, 1151 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1152 { 1153 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1154 } 1155 1156 struct spdk_nvme_poll_group * 1157 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1158 { 1159 struct spdk_nvme_poll_group *group; 1160 1161 group = calloc(1, sizeof(*group)); 1162 if (group == NULL) { 1163 return NULL; 1164 } 1165 1166 group->ctx = ctx; 1167 if (table != NULL) { 1168 group->accel_fn_table = *table; 1169 } 1170 TAILQ_INIT(&group->connected_qpairs); 1171 TAILQ_INIT(&group->disconnected_qpairs); 1172 1173 return group; 1174 } 1175 1176 int 1177 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1178 { 1179 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1180 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1181 return -EBUSY; 1182 } 1183 1184 free(group); 1185 1186 return 0; 1187 } 1188 1189 spdk_nvme_qp_failure_reason 1190 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1191 { 1192 return qpair->failure_reason; 1193 } 1194 1195 bool 1196 spdk_nvme_qpair_is_connected(struct spdk_nvme_qpair *qpair) 1197 { 1198 return qpair->is_connected; 1199 } 1200 1201 int32_t 1202 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1203 uint32_t max_completions) 1204 { 1205 struct ut_nvme_req *req, *tmp; 1206 uint32_t num_completions = 0; 1207 1208 if (!qpair->is_connected) { 1209 return -ENXIO; 1210 } 1211 1212 qpair->in_completion_context = true; 1213 1214 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1215 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1216 qpair->num_outstanding_reqs--; 1217 1218 req->cb_fn(req->cb_arg, &req->cpl); 1219 1220 free(req); 1221 num_completions++; 1222 } 1223 1224 qpair->in_completion_context = false; 1225 if (qpair->delete_after_completion_context) { 1226 spdk_nvme_ctrlr_free_io_qpair(qpair); 1227 } 1228 1229 return num_completions; 1230 } 1231 1232 int64_t 1233 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1234 uint32_t completions_per_qpair, 1235 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1236 { 1237 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1238 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1239 1240 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1241 1242 if (disconnected_qpair_cb == NULL) { 1243 return -EINVAL; 1244 } 1245 1246 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1247 disconnected_qpair_cb(qpair, group->ctx); 1248 } 1249 1250 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1251 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1252 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1253 /* Bump the number of completions so this counts as "busy" */ 1254 num_completions++; 1255 continue; 1256 } 1257 1258 local_completions = spdk_nvme_qpair_process_completions(qpair, 1259 completions_per_qpair); 1260 if (local_completions < 0 && error_reason == 0) { 1261 error_reason = local_completions; 1262 } else { 1263 num_completions += local_completions; 1264 assert(num_completions >= 0); 1265 } 1266 } 1267 1268 return error_reason ? error_reason : num_completions; 1269 } 1270 1271 int 1272 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1273 struct spdk_nvme_qpair *qpair) 1274 { 1275 CU_ASSERT(!qpair->is_connected); 1276 1277 qpair->poll_group = group; 1278 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1279 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1280 1281 return 0; 1282 } 1283 1284 int 1285 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1286 struct spdk_nvme_qpair *qpair) 1287 { 1288 CU_ASSERT(!qpair->is_connected); 1289 1290 if (qpair->poll_group == NULL) { 1291 return -ENOENT; 1292 } 1293 1294 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1295 1296 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1297 1298 qpair->poll_group = NULL; 1299 qpair->poll_group_tailq_head = NULL; 1300 1301 return 0; 1302 } 1303 1304 int 1305 spdk_bdev_register(struct spdk_bdev *bdev) 1306 { 1307 g_ut_registered_bdev = bdev; 1308 1309 return g_ut_register_bdev_status; 1310 } 1311 1312 void 1313 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1314 { 1315 int rc; 1316 1317 rc = bdev->fn_table->destruct(bdev->ctxt); 1318 1319 if (bdev == g_ut_registered_bdev) { 1320 g_ut_registered_bdev = NULL; 1321 } 1322 1323 if (rc <= 0 && cb_fn != NULL) { 1324 cb_fn(cb_arg, rc); 1325 } 1326 } 1327 1328 int 1329 spdk_bdev_open_ext(const char *bdev_name, bool write, 1330 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1331 struct spdk_bdev_desc **desc) 1332 { 1333 if (g_ut_registered_bdev == NULL || 1334 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1335 return -ENODEV; 1336 } 1337 1338 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1339 1340 return 0; 1341 } 1342 1343 struct spdk_bdev * 1344 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1345 { 1346 return (struct spdk_bdev *)desc; 1347 } 1348 1349 int 1350 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1351 { 1352 bdev->blockcnt = size; 1353 1354 return 0; 1355 } 1356 1357 struct spdk_io_channel * 1358 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1359 { 1360 return (struct spdk_io_channel *)bdev_io->internal.ch; 1361 } 1362 1363 struct spdk_thread * 1364 spdk_bdev_io_get_thread(struct spdk_bdev_io *bdev_io) 1365 { 1366 return spdk_io_channel_get_thread(spdk_bdev_io_get_io_channel(bdev_io)); 1367 } 1368 1369 void 1370 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1371 { 1372 bdev_io->internal.status = status; 1373 bdev_io->internal.f.in_submit_request = false; 1374 } 1375 1376 void 1377 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1378 { 1379 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1380 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1381 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1382 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1383 } else { 1384 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1385 } 1386 1387 bdev_io->internal.error.nvme.cdw0 = cdw0; 1388 bdev_io->internal.error.nvme.sct = sct; 1389 bdev_io->internal.error.nvme.sc = sc; 1390 1391 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1392 } 1393 1394 void 1395 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1396 { 1397 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1398 1399 ut_bdev_io_set_buf(bdev_io); 1400 1401 cb(ch, bdev_io, true); 1402 } 1403 1404 static void 1405 test_create_ctrlr(void) 1406 { 1407 struct spdk_nvme_transport_id trid = {}; 1408 struct spdk_nvme_ctrlr ctrlr = {}; 1409 int rc; 1410 1411 ut_init_trid(&trid); 1412 1413 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1414 CU_ASSERT(rc == 0); 1415 1416 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1417 1418 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1419 CU_ASSERT(rc == 0); 1420 1421 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1422 1423 poll_threads(); 1424 spdk_delay_us(1000); 1425 poll_threads(); 1426 1427 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1428 } 1429 1430 static void 1431 ut_check_hotplug_on_reset(void *cb_arg, int rc) 1432 { 1433 bool *detect_remove = cb_arg; 1434 1435 CU_ASSERT(rc != 0); 1436 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1437 1438 *detect_remove = true; 1439 } 1440 1441 static void 1442 test_reset_ctrlr(void) 1443 { 1444 struct spdk_nvme_transport_id trid = {}; 1445 struct spdk_nvme_ctrlr ctrlr = {}; 1446 struct nvme_ctrlr *nvme_ctrlr = NULL; 1447 struct nvme_path_id *curr_trid; 1448 struct spdk_io_channel *ch1, *ch2; 1449 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1450 bool detect_remove; 1451 int rc; 1452 1453 ut_init_trid(&trid); 1454 TAILQ_INIT(&ctrlr.active_io_qpairs); 1455 1456 set_thread(0); 1457 1458 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1459 CU_ASSERT(rc == 0); 1460 1461 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1462 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1463 1464 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1465 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1466 1467 ch1 = spdk_get_io_channel(nvme_ctrlr); 1468 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1469 1470 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1471 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1472 1473 set_thread(1); 1474 1475 ch2 = spdk_get_io_channel(nvme_ctrlr); 1476 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1477 1478 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1479 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1480 1481 /* Reset starts from thread 1. */ 1482 set_thread(1); 1483 1484 /* Case 1: ctrlr is already being destructed. */ 1485 nvme_ctrlr->destruct = true; 1486 1487 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1488 CU_ASSERT(rc == -ENXIO); 1489 1490 /* Case 2: reset is in progress. */ 1491 nvme_ctrlr->destruct = false; 1492 nvme_ctrlr->resetting = true; 1493 1494 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1495 CU_ASSERT(rc == -EBUSY); 1496 1497 /* Case 3: reset completes successfully. */ 1498 nvme_ctrlr->resetting = false; 1499 curr_trid->last_failed_tsc = spdk_get_ticks(); 1500 ctrlr.is_failed = true; 1501 1502 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1503 CU_ASSERT(rc == 0); 1504 CU_ASSERT(nvme_ctrlr->resetting == true); 1505 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1506 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1507 1508 poll_thread_times(0, 3); 1509 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1510 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1511 1512 poll_thread_times(0, 1); 1513 poll_thread_times(1, 1); 1514 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1515 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1516 CU_ASSERT(ctrlr.is_failed == true); 1517 1518 poll_thread_times(1, 1); 1519 poll_thread_times(0, 1); 1520 CU_ASSERT(ctrlr.is_failed == false); 1521 CU_ASSERT(ctrlr.adminq.is_connected == false); 1522 1523 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1524 poll_thread_times(0, 2); 1525 CU_ASSERT(ctrlr.adminq.is_connected == true); 1526 1527 poll_thread_times(0, 1); 1528 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1529 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1530 1531 poll_thread_times(1, 1); 1532 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1533 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1534 CU_ASSERT(nvme_ctrlr->resetting == true); 1535 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1536 1537 poll_thread_times(0, 2); 1538 CU_ASSERT(nvme_ctrlr->resetting == true); 1539 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1540 poll_thread_times(1, 1); 1541 CU_ASSERT(nvme_ctrlr->resetting == true); 1542 poll_thread_times(0, 1); 1543 CU_ASSERT(nvme_ctrlr->resetting == false); 1544 1545 /* Case 4: ctrlr is already removed. */ 1546 ctrlr.is_removed = true; 1547 1548 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1549 CU_ASSERT(rc == 0); 1550 1551 detect_remove = false; 1552 nvme_ctrlr->ctrlr_op_cb_fn = ut_check_hotplug_on_reset; 1553 nvme_ctrlr->ctrlr_op_cb_arg = &detect_remove; 1554 1555 poll_threads(); 1556 1557 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_fn == NULL); 1558 CU_ASSERT(nvme_ctrlr->ctrlr_op_cb_arg == NULL); 1559 CU_ASSERT(detect_remove == true); 1560 1561 ctrlr.is_removed = false; 1562 1563 spdk_put_io_channel(ch2); 1564 1565 set_thread(0); 1566 1567 spdk_put_io_channel(ch1); 1568 1569 poll_threads(); 1570 1571 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1572 CU_ASSERT(rc == 0); 1573 1574 poll_threads(); 1575 spdk_delay_us(1000); 1576 poll_threads(); 1577 1578 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1579 } 1580 1581 static void 1582 test_race_between_reset_and_destruct_ctrlr(void) 1583 { 1584 struct spdk_nvme_transport_id trid = {}; 1585 struct spdk_nvme_ctrlr ctrlr = {}; 1586 struct nvme_ctrlr *nvme_ctrlr; 1587 struct spdk_io_channel *ch1, *ch2; 1588 int rc; 1589 1590 ut_init_trid(&trid); 1591 TAILQ_INIT(&ctrlr.active_io_qpairs); 1592 1593 set_thread(0); 1594 1595 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1596 CU_ASSERT(rc == 0); 1597 1598 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1599 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1600 1601 ch1 = spdk_get_io_channel(nvme_ctrlr); 1602 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1603 1604 set_thread(1); 1605 1606 ch2 = spdk_get_io_channel(nvme_ctrlr); 1607 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1608 1609 /* Reset starts from thread 1. */ 1610 set_thread(1); 1611 1612 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1613 CU_ASSERT(rc == 0); 1614 CU_ASSERT(nvme_ctrlr->resetting == true); 1615 1616 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1617 set_thread(0); 1618 1619 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1620 CU_ASSERT(rc == 0); 1621 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1622 CU_ASSERT(nvme_ctrlr->destruct == true); 1623 CU_ASSERT(nvme_ctrlr->resetting == true); 1624 1625 poll_threads(); 1626 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1627 poll_threads(); 1628 1629 /* Reset completed but ctrlr is not still destructed yet. */ 1630 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1631 CU_ASSERT(nvme_ctrlr->destruct == true); 1632 CU_ASSERT(nvme_ctrlr->resetting == false); 1633 1634 /* New reset request is rejected. */ 1635 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1636 CU_ASSERT(rc == -ENXIO); 1637 1638 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1639 * However there are two channels and destruct is not completed yet. 1640 */ 1641 poll_threads(); 1642 1643 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1644 1645 set_thread(0); 1646 1647 spdk_put_io_channel(ch1); 1648 1649 set_thread(1); 1650 1651 spdk_put_io_channel(ch2); 1652 1653 poll_threads(); 1654 spdk_delay_us(1000); 1655 poll_threads(); 1656 1657 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1658 } 1659 1660 static void 1661 test_failover_ctrlr(void) 1662 { 1663 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1664 struct spdk_nvme_ctrlr ctrlr = {}; 1665 struct nvme_ctrlr *nvme_ctrlr = NULL; 1666 struct nvme_path_id *curr_trid, *next_trid; 1667 struct spdk_io_channel *ch1, *ch2; 1668 int rc; 1669 1670 ut_init_trid(&trid1); 1671 ut_init_trid2(&trid2); 1672 TAILQ_INIT(&ctrlr.active_io_qpairs); 1673 1674 set_thread(0); 1675 1676 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1677 CU_ASSERT(rc == 0); 1678 1679 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1680 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1681 1682 ch1 = spdk_get_io_channel(nvme_ctrlr); 1683 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1684 1685 set_thread(1); 1686 1687 ch2 = spdk_get_io_channel(nvme_ctrlr); 1688 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1689 1690 /* First, test one trid case. */ 1691 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1692 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1693 1694 /* Failover starts from thread 1. */ 1695 set_thread(1); 1696 1697 /* Case 1: ctrlr is already being destructed. */ 1698 nvme_ctrlr->destruct = true; 1699 1700 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1701 CU_ASSERT(rc == -ENXIO); 1702 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1703 1704 /* Case 2: reset is in progress. */ 1705 nvme_ctrlr->destruct = false; 1706 nvme_ctrlr->resetting = true; 1707 1708 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1709 CU_ASSERT(rc == -EINPROGRESS); 1710 1711 /* Case 3: reset completes successfully. */ 1712 nvme_ctrlr->resetting = false; 1713 1714 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1715 CU_ASSERT(rc == 0); 1716 1717 CU_ASSERT(nvme_ctrlr->resetting == true); 1718 CU_ASSERT(curr_trid->last_failed_tsc != 0); 1719 1720 poll_threads(); 1721 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1722 poll_threads(); 1723 1724 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1725 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1726 1727 CU_ASSERT(nvme_ctrlr->resetting == false); 1728 CU_ASSERT(curr_trid->last_failed_tsc == 0); 1729 1730 set_thread(0); 1731 1732 /* Second, test two trids case. */ 1733 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1734 CU_ASSERT(rc == 0); 1735 1736 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1737 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1738 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1739 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1740 1741 /* Failover starts from thread 1. */ 1742 set_thread(1); 1743 1744 /* Case 4: reset is in progress. */ 1745 nvme_ctrlr->resetting = true; 1746 1747 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1748 CU_ASSERT(rc == -EINPROGRESS); 1749 1750 /* Case 5: failover completes successfully. */ 1751 nvme_ctrlr->resetting = false; 1752 1753 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 1754 CU_ASSERT(rc == 0); 1755 1756 CU_ASSERT(nvme_ctrlr->resetting == true); 1757 1758 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1759 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1760 CU_ASSERT(next_trid != curr_trid); 1761 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1762 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1763 1764 poll_threads(); 1765 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1766 poll_threads(); 1767 1768 CU_ASSERT(nvme_ctrlr->resetting == false); 1769 1770 spdk_put_io_channel(ch2); 1771 1772 set_thread(0); 1773 1774 spdk_put_io_channel(ch1); 1775 1776 poll_threads(); 1777 1778 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1779 CU_ASSERT(rc == 0); 1780 1781 poll_threads(); 1782 spdk_delay_us(1000); 1783 poll_threads(); 1784 1785 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1786 } 1787 1788 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1789 * 1790 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1791 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1792 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1793 * have been active, i.e., the head of the list until the failover completed. 1794 * However trid3 was inserted to the head of the list by mistake. 1795 * 1796 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1797 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1798 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1799 * may be executed repeatedly before failover is executed. Hence this bug is real. 1800 * 1801 * The following test verifies the fix. 1802 */ 1803 static void 1804 test_race_between_failover_and_add_secondary_trid(void) 1805 { 1806 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1807 struct spdk_nvme_ctrlr ctrlr = {}; 1808 struct nvme_ctrlr *nvme_ctrlr = NULL; 1809 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1810 struct spdk_io_channel *ch1, *ch2; 1811 int rc; 1812 1813 ut_init_trid(&trid1); 1814 ut_init_trid2(&trid2); 1815 ut_init_trid3(&trid3); 1816 TAILQ_INIT(&ctrlr.active_io_qpairs); 1817 1818 set_thread(0); 1819 1820 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1821 CU_ASSERT(rc == 0); 1822 1823 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1824 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1825 1826 ch1 = spdk_get_io_channel(nvme_ctrlr); 1827 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1828 1829 set_thread(1); 1830 1831 ch2 = spdk_get_io_channel(nvme_ctrlr); 1832 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1833 1834 set_thread(0); 1835 1836 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1837 CU_ASSERT(rc == 0); 1838 1839 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1840 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1841 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1842 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1843 path_id2 = TAILQ_NEXT(path_id1, link); 1844 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1845 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1846 1847 ctrlr.fail_reset = true; 1848 1849 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1850 CU_ASSERT(rc == 0); 1851 1852 poll_threads(); 1853 1854 CU_ASSERT(path_id1->last_failed_tsc != 0); 1855 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1856 1857 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 1858 CU_ASSERT(rc == 0); 1859 1860 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1861 CU_ASSERT(rc == 0); 1862 1863 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1864 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1865 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1866 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1867 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1868 path_id3 = TAILQ_NEXT(path_id2, link); 1869 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1870 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1871 1872 poll_threads(); 1873 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1874 poll_threads(); 1875 1876 spdk_put_io_channel(ch1); 1877 1878 set_thread(1); 1879 1880 spdk_put_io_channel(ch2); 1881 1882 poll_threads(); 1883 1884 set_thread(0); 1885 1886 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 1887 CU_ASSERT(rc == 0); 1888 1889 poll_threads(); 1890 spdk_delay_us(1000); 1891 poll_threads(); 1892 1893 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1894 } 1895 1896 static void 1897 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1898 { 1899 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1900 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1901 } 1902 1903 static void 1904 test_pending_reset(void) 1905 { 1906 struct spdk_nvme_transport_id trid = {}; 1907 struct spdk_nvme_ctrlr *ctrlr; 1908 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 1909 struct nvme_ctrlr *nvme_ctrlr = NULL; 1910 const int STRING_SIZE = 32; 1911 const char *attached_names[STRING_SIZE]; 1912 struct nvme_bdev *bdev; 1913 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1914 struct spdk_io_channel *ch1, *ch2; 1915 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1916 struct nvme_io_path *io_path1, *io_path2; 1917 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1918 int rc; 1919 1920 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1921 ut_init_trid(&trid); 1922 1923 set_thread(0); 1924 1925 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1926 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1927 1928 g_ut_attach_ctrlr_status = 0; 1929 g_ut_attach_bdev_count = 1; 1930 1931 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1932 attach_ctrlr_done, NULL, &opts, NULL, false); 1933 CU_ASSERT(rc == 0); 1934 1935 spdk_delay_us(1000); 1936 poll_threads(); 1937 1938 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1939 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1940 1941 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1942 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1943 1944 ch1 = spdk_get_io_channel(bdev); 1945 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1946 1947 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1948 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1949 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1950 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1951 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1952 1953 set_thread(1); 1954 1955 ch2 = spdk_get_io_channel(bdev); 1956 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1957 1958 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1959 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1960 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1961 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1962 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1963 1964 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1965 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1966 1967 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1968 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1969 1970 /* The first reset request is submitted on thread 1, and the second reset request 1971 * is submitted on thread 0 while processing the first request. 1972 */ 1973 bdev_nvme_submit_request(ch2, first_bdev_io); 1974 1975 poll_thread_times(0, 1); 1976 poll_thread_times(1, 2); 1977 1978 CU_ASSERT(nvme_ctrlr->resetting == true); 1979 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1980 1981 set_thread(0); 1982 1983 bdev_nvme_submit_request(ch1, second_bdev_io); 1984 1985 poll_thread_times(0, 1); 1986 poll_thread_times(1, 1); 1987 poll_thread_times(0, 2); 1988 poll_thread_times(1, 1); 1989 poll_thread_times(0, 1); 1990 1991 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 1992 1993 poll_threads(); 1994 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1995 poll_threads(); 1996 1997 CU_ASSERT(nvme_ctrlr->resetting == false); 1998 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1999 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2000 2001 /* The first reset request is submitted on thread 1, and the second reset request 2002 * is submitted on thread 0 while processing the first request. 2003 * 2004 * The difference from the above scenario is that the controller is removed while 2005 * processing the first request. Hence both reset requests should fail. 2006 */ 2007 set_thread(1); 2008 2009 bdev_nvme_submit_request(ch2, first_bdev_io); 2010 2011 poll_thread_times(0, 1); 2012 poll_thread_times(1, 2); 2013 2014 CU_ASSERT(nvme_ctrlr->resetting == true); 2015 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 2016 2017 set_thread(0); 2018 2019 bdev_nvme_submit_request(ch1, second_bdev_io); 2020 2021 poll_thread_times(0, 1); 2022 poll_thread_times(1, 1); 2023 poll_thread_times(0, 2); 2024 poll_thread_times(1, 1); 2025 poll_thread_times(0, 1); 2026 2027 CU_ASSERT(spdk_bdev_io_from_ctx(TAILQ_FIRST(&ctrlr_ch1->pending_resets)) == second_bdev_io); 2028 2029 ctrlr->fail_reset = true; 2030 2031 poll_threads(); 2032 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2033 poll_threads(); 2034 2035 CU_ASSERT(nvme_ctrlr->resetting == false); 2036 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2037 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2038 2039 spdk_put_io_channel(ch1); 2040 2041 set_thread(1); 2042 2043 spdk_put_io_channel(ch2); 2044 2045 poll_threads(); 2046 2047 set_thread(0); 2048 2049 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2050 CU_ASSERT(rc == 0); 2051 2052 poll_threads(); 2053 spdk_delay_us(1000); 2054 poll_threads(); 2055 2056 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2057 2058 free(first_bdev_io); 2059 free(second_bdev_io); 2060 } 2061 2062 static void 2063 test_attach_ctrlr(void) 2064 { 2065 struct spdk_nvme_transport_id trid = {}; 2066 struct spdk_nvme_ctrlr *ctrlr; 2067 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2068 struct nvme_ctrlr *nvme_ctrlr; 2069 const int STRING_SIZE = 32; 2070 const char *attached_names[STRING_SIZE]; 2071 struct nvme_bdev *nbdev; 2072 int rc; 2073 2074 set_thread(0); 2075 2076 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2077 ut_init_trid(&trid); 2078 2079 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 2080 * by probe polling. 2081 */ 2082 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2083 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2084 2085 ctrlr->is_failed = true; 2086 g_ut_attach_ctrlr_status = -EIO; 2087 g_ut_attach_bdev_count = 0; 2088 2089 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2090 attach_ctrlr_done, NULL, &opts, NULL, false); 2091 CU_ASSERT(rc == 0); 2092 2093 spdk_delay_us(1000); 2094 poll_threads(); 2095 2096 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2097 2098 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2099 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2100 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2101 2102 g_ut_attach_ctrlr_status = 0; 2103 2104 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2105 attach_ctrlr_done, NULL, &opts, NULL, false); 2106 CU_ASSERT(rc == 0); 2107 2108 spdk_delay_us(1000); 2109 poll_threads(); 2110 2111 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2112 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2113 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2114 2115 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2116 CU_ASSERT(rc == 0); 2117 2118 poll_threads(); 2119 spdk_delay_us(1000); 2120 poll_threads(); 2121 2122 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2123 2124 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2125 * one nvme_bdev is created. 2126 */ 2127 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2128 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2129 2130 g_ut_attach_bdev_count = 1; 2131 2132 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2133 attach_ctrlr_done, NULL, &opts, NULL, false); 2134 CU_ASSERT(rc == 0); 2135 2136 spdk_delay_us(1000); 2137 poll_threads(); 2138 2139 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2140 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2141 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2142 2143 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2144 attached_names[0] = NULL; 2145 2146 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2147 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2148 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2149 2150 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2151 CU_ASSERT(rc == 0); 2152 2153 poll_threads(); 2154 spdk_delay_us(1000); 2155 poll_threads(); 2156 2157 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2158 2159 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2160 * created because creating one nvme_bdev failed. 2161 */ 2162 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2163 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2164 2165 g_ut_register_bdev_status = -EINVAL; 2166 g_ut_attach_bdev_count = 0; 2167 2168 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2169 attach_ctrlr_done, NULL, &opts, NULL, false); 2170 CU_ASSERT(rc == 0); 2171 2172 spdk_delay_us(1000); 2173 poll_threads(); 2174 2175 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2176 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2177 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2178 2179 CU_ASSERT(attached_names[0] == NULL); 2180 2181 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2182 CU_ASSERT(rc == 0); 2183 2184 poll_threads(); 2185 spdk_delay_us(1000); 2186 poll_threads(); 2187 2188 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2189 2190 g_ut_register_bdev_status = 0; 2191 } 2192 2193 static void 2194 test_aer_cb(void) 2195 { 2196 struct spdk_nvme_transport_id trid = {}; 2197 struct spdk_nvme_ctrlr *ctrlr; 2198 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2199 struct nvme_ctrlr *nvme_ctrlr; 2200 struct nvme_bdev *bdev; 2201 const int STRING_SIZE = 32; 2202 const char *attached_names[STRING_SIZE]; 2203 union spdk_nvme_async_event_completion event = {}; 2204 struct spdk_nvme_cpl cpl = {}; 2205 int rc; 2206 2207 set_thread(0); 2208 2209 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2210 ut_init_trid(&trid); 2211 2212 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2213 * namespaces are populated. 2214 */ 2215 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2216 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2217 2218 ctrlr->ns[0].is_active = false; 2219 2220 g_ut_attach_ctrlr_status = 0; 2221 g_ut_attach_bdev_count = 3; 2222 2223 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2224 attach_ctrlr_done, NULL, &opts, NULL, false); 2225 CU_ASSERT(rc == 0); 2226 2227 spdk_delay_us(1000); 2228 poll_threads(); 2229 2230 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2231 poll_threads(); 2232 2233 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2234 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2235 2236 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2237 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2238 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2239 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2240 2241 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2242 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2243 CU_ASSERT(bdev->disk.blockcnt == 1024); 2244 2245 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2246 * change the size of the 4th namespace. 2247 */ 2248 ctrlr->ns[0].is_active = true; 2249 ctrlr->ns[2].is_active = false; 2250 ctrlr->nsdata[3].nsze = 2048; 2251 2252 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2253 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2254 cpl.cdw0 = event.raw; 2255 2256 aer_cb(nvme_ctrlr, &cpl); 2257 2258 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2259 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2260 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2261 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2262 CU_ASSERT(bdev->disk.blockcnt == 2048); 2263 2264 /* Change ANA state of active namespaces. */ 2265 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2266 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2267 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2268 2269 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2270 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2271 cpl.cdw0 = event.raw; 2272 2273 aer_cb(nvme_ctrlr, &cpl); 2274 2275 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2276 poll_threads(); 2277 2278 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2279 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2280 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2281 2282 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2283 CU_ASSERT(rc == 0); 2284 2285 poll_threads(); 2286 spdk_delay_us(1000); 2287 poll_threads(); 2288 2289 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2290 } 2291 2292 static void 2293 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2294 enum spdk_bdev_io_type io_type) 2295 { 2296 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2297 struct nvme_io_path *io_path; 2298 struct spdk_nvme_qpair *qpair; 2299 2300 io_path = bdev_nvme_find_io_path(nbdev_ch); 2301 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2302 qpair = io_path->qpair->qpair; 2303 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2304 2305 bdev_io->type = io_type; 2306 bdev_io->internal.f.in_submit_request = true; 2307 2308 bdev_nvme_submit_request(ch, bdev_io); 2309 2310 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 2311 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2312 2313 poll_threads(); 2314 2315 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 2316 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2317 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2318 } 2319 2320 static void 2321 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2322 enum spdk_bdev_io_type io_type) 2323 { 2324 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2325 struct nvme_io_path *io_path; 2326 struct spdk_nvme_qpair *qpair; 2327 2328 io_path = bdev_nvme_find_io_path(nbdev_ch); 2329 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2330 qpair = io_path->qpair->qpair; 2331 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2332 2333 bdev_io->type = io_type; 2334 bdev_io->internal.f.in_submit_request = true; 2335 2336 bdev_nvme_submit_request(ch, bdev_io); 2337 2338 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 2339 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2340 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2341 } 2342 2343 static void 2344 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2345 { 2346 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2347 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2348 struct ut_nvme_req *req; 2349 struct nvme_io_path *io_path; 2350 struct spdk_nvme_qpair *qpair; 2351 2352 io_path = bdev_nvme_find_io_path(nbdev_ch); 2353 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2354 qpair = io_path->qpair->qpair; 2355 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2356 2357 /* Only compare and write now. */ 2358 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2359 bdev_io->internal.f.in_submit_request = true; 2360 2361 bdev_nvme_submit_request(ch, bdev_io); 2362 2363 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 2364 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2365 CU_ASSERT(bio->first_fused_submitted == true); 2366 2367 /* First outstanding request is compare operation. */ 2368 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2369 SPDK_CU_ASSERT_FATAL(req != NULL); 2370 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2371 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2372 2373 poll_threads(); 2374 2375 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 2376 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2377 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2378 } 2379 2380 static void 2381 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2382 struct spdk_nvme_ctrlr *ctrlr) 2383 { 2384 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2385 bdev_io->internal.f.in_submit_request = true; 2386 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2387 2388 bdev_nvme_submit_request(ch, bdev_io); 2389 2390 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 2391 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2392 2393 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2394 poll_thread_times(1, 1); 2395 2396 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 2397 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2398 2399 poll_thread_times(0, 1); 2400 2401 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 2402 } 2403 2404 static void 2405 test_submit_nvme_cmd(void) 2406 { 2407 struct spdk_nvme_transport_id trid = {}; 2408 struct spdk_nvme_ctrlr *ctrlr; 2409 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2410 struct nvme_ctrlr *nvme_ctrlr; 2411 const int STRING_SIZE = 32; 2412 const char *attached_names[STRING_SIZE]; 2413 struct nvme_bdev *bdev; 2414 struct spdk_bdev_io *bdev_io; 2415 struct spdk_io_channel *ch; 2416 int rc; 2417 2418 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2419 ut_init_trid(&trid); 2420 2421 set_thread(1); 2422 2423 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2424 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2425 2426 g_ut_attach_ctrlr_status = 0; 2427 g_ut_attach_bdev_count = 1; 2428 2429 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2430 attach_ctrlr_done, NULL, &opts, NULL, false); 2431 CU_ASSERT(rc == 0); 2432 2433 spdk_delay_us(1000); 2434 poll_threads(); 2435 2436 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2437 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2438 2439 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2440 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2441 2442 set_thread(0); 2443 2444 ch = spdk_get_io_channel(bdev); 2445 SPDK_CU_ASSERT_FATAL(ch != NULL); 2446 2447 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2448 2449 bdev_io->u.bdev.iovs = NULL; 2450 2451 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2452 2453 ut_bdev_io_set_buf(bdev_io); 2454 2455 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2456 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2457 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2458 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2459 2460 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2461 2462 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2463 2464 /* Verify that ext NVME API is called when data is described by memory domain */ 2465 g_ut_read_ext_called = false; 2466 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2467 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2468 CU_ASSERT(g_ut_read_ext_called == true); 2469 g_ut_read_ext_called = false; 2470 bdev_io->u.bdev.memory_domain = NULL; 2471 2472 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2473 2474 free(bdev_io); 2475 2476 spdk_put_io_channel(ch); 2477 2478 poll_threads(); 2479 2480 set_thread(1); 2481 2482 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2483 CU_ASSERT(rc == 0); 2484 2485 poll_threads(); 2486 spdk_delay_us(1000); 2487 poll_threads(); 2488 2489 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2490 } 2491 2492 static void 2493 test_add_remove_trid(void) 2494 { 2495 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2496 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2497 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2498 struct nvme_ctrlr *nvme_ctrlr = NULL; 2499 const int STRING_SIZE = 32; 2500 const char *attached_names[STRING_SIZE]; 2501 struct nvme_path_id *ctrid; 2502 int rc; 2503 2504 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2505 ut_init_trid(&path1.trid); 2506 ut_init_trid2(&path2.trid); 2507 ut_init_trid3(&path3.trid); 2508 2509 set_thread(0); 2510 2511 g_ut_attach_ctrlr_status = 0; 2512 g_ut_attach_bdev_count = 0; 2513 2514 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2515 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2516 2517 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2518 attach_ctrlr_done, NULL, &opts, NULL, false); 2519 CU_ASSERT(rc == 0); 2520 2521 spdk_delay_us(1000); 2522 poll_threads(); 2523 2524 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2525 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2526 2527 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2528 2529 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2530 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2531 2532 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2533 attach_ctrlr_done, NULL, &opts, NULL, false); 2534 CU_ASSERT(rc == 0); 2535 2536 spdk_delay_us(1000); 2537 poll_threads(); 2538 2539 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2540 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2541 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2542 break; 2543 } 2544 } 2545 CU_ASSERT(ctrid != NULL); 2546 2547 /* trid3 is not in the registered list. */ 2548 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2549 CU_ASSERT(rc == -ENXIO); 2550 2551 /* trid2 is not used, and simply removed. */ 2552 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2553 CU_ASSERT(rc == 0); 2554 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2555 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2556 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2557 } 2558 2559 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2560 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2561 2562 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2563 attach_ctrlr_done, NULL, &opts, NULL, false); 2564 CU_ASSERT(rc == 0); 2565 2566 spdk_delay_us(1000); 2567 poll_threads(); 2568 2569 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2570 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2571 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2572 break; 2573 } 2574 } 2575 CU_ASSERT(ctrid != NULL); 2576 2577 /* Mark path3 as failed by setting its last_failed_tsc to non-zero forcefully. 2578 * If we add path2 again, path2 should be inserted between path1 and path3. 2579 * Then, we remove path2. It is not used, and simply removed. 2580 */ 2581 ctrid->last_failed_tsc = spdk_get_ticks() + 1; 2582 2583 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2584 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2585 2586 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2587 attach_ctrlr_done, NULL, &opts, NULL, false); 2588 CU_ASSERT(rc == 0); 2589 2590 spdk_delay_us(1000); 2591 poll_threads(); 2592 2593 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2594 2595 ctrid = TAILQ_NEXT(nvme_ctrlr->active_path_id, link); 2596 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2597 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0); 2598 2599 ctrid = TAILQ_NEXT(ctrid, link); 2600 SPDK_CU_ASSERT_FATAL(ctrid != NULL); 2601 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0); 2602 2603 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 2604 CU_ASSERT(rc == 0); 2605 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2606 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2607 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2608 } 2609 2610 /* path1 is currently used and path3 is an alternative path. 2611 * If we remove path1, path is changed to path3. 2612 */ 2613 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 2614 CU_ASSERT(rc == 0); 2615 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2616 CU_ASSERT(nvme_ctrlr->resetting == true); 2617 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2618 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2619 } 2620 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2621 2622 poll_threads(); 2623 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2624 poll_threads(); 2625 2626 CU_ASSERT(nvme_ctrlr->resetting == false); 2627 2628 /* path3 is the current and only path. If we remove path3, the corresponding 2629 * nvme_ctrlr is removed. 2630 */ 2631 rc = bdev_nvme_delete("nvme0", &path3, NULL, NULL); 2632 CU_ASSERT(rc == 0); 2633 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2634 2635 poll_threads(); 2636 spdk_delay_us(1000); 2637 poll_threads(); 2638 2639 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2640 2641 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2642 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2643 2644 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2645 attach_ctrlr_done, NULL, &opts, NULL, false); 2646 CU_ASSERT(rc == 0); 2647 2648 spdk_delay_us(1000); 2649 poll_threads(); 2650 2651 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2652 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2653 2654 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2655 2656 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2657 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2658 2659 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2660 attach_ctrlr_done, NULL, &opts, NULL, false); 2661 CU_ASSERT(rc == 0); 2662 2663 spdk_delay_us(1000); 2664 poll_threads(); 2665 2666 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2667 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2668 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2669 break; 2670 } 2671 } 2672 CU_ASSERT(ctrid != NULL); 2673 2674 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2675 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2676 CU_ASSERT(rc == 0); 2677 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2678 2679 poll_threads(); 2680 spdk_delay_us(1000); 2681 poll_threads(); 2682 2683 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2684 } 2685 2686 static void 2687 test_abort(void) 2688 { 2689 struct spdk_nvme_transport_id trid = {}; 2690 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 2691 struct spdk_nvme_ctrlr *ctrlr; 2692 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 2693 struct nvme_ctrlr *nvme_ctrlr; 2694 const int STRING_SIZE = 32; 2695 const char *attached_names[STRING_SIZE]; 2696 struct nvme_bdev *bdev; 2697 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2698 struct spdk_io_channel *ch1, *ch2; 2699 struct nvme_bdev_channel *nbdev_ch1; 2700 struct nvme_io_path *io_path1; 2701 struct nvme_qpair *nvme_qpair1; 2702 int rc; 2703 2704 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2705 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2706 * are submitted on thread 1. Both should succeed. 2707 */ 2708 2709 ut_init_trid(&trid); 2710 2711 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2712 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2713 2714 g_ut_attach_ctrlr_status = 0; 2715 g_ut_attach_bdev_count = 1; 2716 2717 set_thread(1); 2718 2719 opts.ctrlr_loss_timeout_sec = -1; 2720 opts.reconnect_delay_sec = 1; 2721 2722 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2723 attach_ctrlr_done, NULL, &dopts, &opts, false); 2724 CU_ASSERT(rc == 0); 2725 2726 spdk_delay_us(1000); 2727 poll_threads(); 2728 2729 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2730 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2731 2732 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2733 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2734 2735 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2736 ut_bdev_io_set_buf(write_io); 2737 2738 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2739 ut_bdev_io_set_buf(fuse_io); 2740 2741 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2742 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2743 2744 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2745 2746 set_thread(0); 2747 2748 ch1 = spdk_get_io_channel(bdev); 2749 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2750 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2751 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2752 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2753 nvme_qpair1 = io_path1->qpair; 2754 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2755 2756 set_thread(1); 2757 2758 ch2 = spdk_get_io_channel(bdev); 2759 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2760 2761 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2762 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2763 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2764 2765 /* Aborting the already completed request should fail. */ 2766 write_io->internal.f.in_submit_request = true; 2767 bdev_nvme_submit_request(ch1, write_io); 2768 poll_threads(); 2769 2770 CU_ASSERT(write_io->internal.f.in_submit_request == false); 2771 2772 abort_io->u.abort.bio_to_abort = write_io; 2773 abort_io->internal.f.in_submit_request = true; 2774 2775 bdev_nvme_submit_request(ch1, abort_io); 2776 2777 poll_threads(); 2778 2779 CU_ASSERT(abort_io->internal.f.in_submit_request == false); 2780 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2781 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2782 2783 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2784 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2785 2786 admin_io->internal.f.in_submit_request = true; 2787 bdev_nvme_submit_request(ch1, admin_io); 2788 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2789 poll_threads(); 2790 2791 CU_ASSERT(admin_io->internal.f.in_submit_request == false); 2792 2793 abort_io->u.abort.bio_to_abort = admin_io; 2794 abort_io->internal.f.in_submit_request = true; 2795 2796 bdev_nvme_submit_request(ch2, abort_io); 2797 2798 poll_threads(); 2799 2800 CU_ASSERT(abort_io->internal.f.in_submit_request == false); 2801 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2802 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2803 2804 /* Aborting the write request should succeed. */ 2805 write_io->internal.f.in_submit_request = true; 2806 bdev_nvme_submit_request(ch1, write_io); 2807 2808 CU_ASSERT(write_io->internal.f.in_submit_request == true); 2809 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2810 2811 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2812 abort_io->u.abort.bio_to_abort = write_io; 2813 abort_io->internal.f.in_submit_request = true; 2814 2815 bdev_nvme_submit_request(ch1, abort_io); 2816 2817 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2818 poll_threads(); 2819 2820 CU_ASSERT(abort_io->internal.f.in_submit_request == false); 2821 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2822 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2823 CU_ASSERT(write_io->internal.f.in_submit_request == false); 2824 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2825 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2826 2827 /* Aborting the fuse request should succeed. */ 2828 fuse_io->internal.f.in_submit_request = true; 2829 bdev_nvme_submit_request(ch1, fuse_io); 2830 2831 CU_ASSERT(fuse_io->internal.f.in_submit_request == true); 2832 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2833 2834 abort_io->u.abort.bio_to_abort = fuse_io; 2835 abort_io->internal.f.in_submit_request = true; 2836 2837 bdev_nvme_submit_request(ch1, abort_io); 2838 2839 spdk_delay_us(10000); 2840 poll_threads(); 2841 2842 CU_ASSERT(abort_io->internal.f.in_submit_request == false); 2843 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2844 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2845 CU_ASSERT(fuse_io->internal.f.in_submit_request == false); 2846 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2847 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2848 2849 /* Aborting the admin request should succeed. */ 2850 admin_io->internal.f.in_submit_request = true; 2851 bdev_nvme_submit_request(ch1, admin_io); 2852 2853 CU_ASSERT(admin_io->internal.f.in_submit_request == true); 2854 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2855 2856 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2857 abort_io->u.abort.bio_to_abort = admin_io; 2858 abort_io->internal.f.in_submit_request = true; 2859 2860 bdev_nvme_submit_request(ch2, abort_io); 2861 2862 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2863 poll_threads(); 2864 2865 CU_ASSERT(abort_io->internal.f.in_submit_request == false); 2866 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2867 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2868 CU_ASSERT(admin_io->internal.f.in_submit_request == false); 2869 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2870 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2871 2872 set_thread(0); 2873 2874 /* If qpair is disconnected, it is freed and then reconnected via resetting 2875 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2876 * while resetting the nvme_ctrlr. 2877 */ 2878 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2879 2880 poll_thread_times(0, 3); 2881 2882 CU_ASSERT(nvme_qpair1->qpair == NULL); 2883 CU_ASSERT(nvme_ctrlr->resetting == true); 2884 2885 write_io->internal.f.in_submit_request = true; 2886 2887 bdev_nvme_submit_request(ch1, write_io); 2888 2889 CU_ASSERT(write_io->internal.f.in_submit_request == true); 2890 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 2891 2892 /* Aborting the queued write request should succeed immediately. */ 2893 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2894 abort_io->u.abort.bio_to_abort = write_io; 2895 abort_io->internal.f.in_submit_request = true; 2896 2897 bdev_nvme_submit_request(ch1, abort_io); 2898 2899 CU_ASSERT(abort_io->internal.f.in_submit_request == false); 2900 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2901 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2902 CU_ASSERT(write_io->internal.f.in_submit_request == false); 2903 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2904 2905 poll_threads(); 2906 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2907 poll_threads(); 2908 2909 spdk_put_io_channel(ch1); 2910 2911 set_thread(1); 2912 2913 spdk_put_io_channel(ch2); 2914 2915 poll_threads(); 2916 2917 free(write_io); 2918 free(fuse_io); 2919 free(admin_io); 2920 free(abort_io); 2921 2922 set_thread(1); 2923 2924 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2925 CU_ASSERT(rc == 0); 2926 2927 poll_threads(); 2928 spdk_delay_us(1000); 2929 poll_threads(); 2930 2931 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2932 } 2933 2934 static void 2935 test_get_io_qpair(void) 2936 { 2937 struct spdk_nvme_transport_id trid = {}; 2938 struct spdk_nvme_ctrlr ctrlr = {}; 2939 struct nvme_ctrlr *nvme_ctrlr = NULL; 2940 struct spdk_io_channel *ch; 2941 struct nvme_ctrlr_channel *ctrlr_ch; 2942 struct spdk_nvme_qpair *qpair; 2943 int rc; 2944 2945 ut_init_trid(&trid); 2946 TAILQ_INIT(&ctrlr.active_io_qpairs); 2947 2948 set_thread(0); 2949 2950 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2951 CU_ASSERT(rc == 0); 2952 2953 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2954 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2955 2956 ch = spdk_get_io_channel(nvme_ctrlr); 2957 SPDK_CU_ASSERT_FATAL(ch != NULL); 2958 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2959 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2960 2961 qpair = bdev_nvme_get_io_qpair(ch); 2962 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2963 2964 spdk_put_io_channel(ch); 2965 2966 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 2967 CU_ASSERT(rc == 0); 2968 2969 poll_threads(); 2970 spdk_delay_us(1000); 2971 poll_threads(); 2972 2973 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2974 } 2975 2976 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2977 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2978 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2979 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2980 */ 2981 static void 2982 test_bdev_unregister(void) 2983 { 2984 struct spdk_nvme_transport_id trid = {}; 2985 struct spdk_nvme_ctrlr *ctrlr; 2986 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 2987 struct nvme_ctrlr *nvme_ctrlr; 2988 struct nvme_ns *nvme_ns1, *nvme_ns2; 2989 const int STRING_SIZE = 32; 2990 const char *attached_names[STRING_SIZE]; 2991 struct nvme_bdev *bdev1, *bdev2; 2992 int rc; 2993 2994 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2995 ut_init_trid(&trid); 2996 2997 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2998 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2999 3000 g_ut_attach_ctrlr_status = 0; 3001 g_ut_attach_bdev_count = 2; 3002 3003 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3004 attach_ctrlr_done, NULL, &opts, NULL, false); 3005 CU_ASSERT(rc == 0); 3006 3007 spdk_delay_us(1000); 3008 poll_threads(); 3009 3010 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3011 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3012 3013 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 3014 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3015 3016 bdev1 = nvme_ns1->bdev; 3017 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3018 3019 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 3020 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3021 3022 bdev2 = nvme_ns2->bdev; 3023 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3024 3025 bdev_nvme_destruct(&bdev1->disk); 3026 bdev_nvme_destruct(&bdev2->disk); 3027 3028 poll_threads(); 3029 3030 CU_ASSERT(nvme_ns1->bdev == NULL); 3031 CU_ASSERT(nvme_ns2->bdev == NULL); 3032 3033 nvme_ctrlr->destruct = true; 3034 _nvme_ctrlr_destruct(nvme_ctrlr); 3035 3036 poll_threads(); 3037 spdk_delay_us(1000); 3038 poll_threads(); 3039 3040 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3041 } 3042 3043 static void 3044 test_compare_ns(void) 3045 { 3046 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 3047 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 3048 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 3049 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 3050 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 3051 3052 /* No IDs are defined. */ 3053 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3054 3055 /* Only EUI64 are defined and not matched. */ 3056 nsdata1.eui64 = 0xABCDEF0123456789; 3057 nsdata2.eui64 = 0xBBCDEF0123456789; 3058 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3059 3060 /* Only EUI64 are defined and matched. */ 3061 nsdata2.eui64 = 0xABCDEF0123456789; 3062 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3063 3064 /* Only NGUID are defined and not matched. */ 3065 nsdata1.eui64 = 0x0; 3066 nsdata2.eui64 = 0x0; 3067 nsdata1.nguid[0] = 0x12; 3068 nsdata2.nguid[0] = 0x10; 3069 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3070 3071 /* Only NGUID are defined and matched. */ 3072 nsdata2.nguid[0] = 0x12; 3073 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3074 3075 /* Only UUID are defined and not matched. */ 3076 nsdata1.nguid[0] = 0x0; 3077 nsdata2.nguid[0] = 0x0; 3078 ns1.uuid = &uuid1; 3079 ns2.uuid = &uuid2; 3080 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3081 3082 /* Only one UUID is defined. */ 3083 ns1.uuid = NULL; 3084 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3085 3086 /* Only UUID are defined and matched. */ 3087 ns1.uuid = &uuid2; 3088 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3089 3090 /* All EUI64, NGUID, and UUID are defined and matched. */ 3091 nsdata1.eui64 = 0x123456789ABCDEF; 3092 nsdata2.eui64 = 0x123456789ABCDEF; 3093 nsdata1.nguid[15] = 0x34; 3094 nsdata2.nguid[15] = 0x34; 3095 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 3096 3097 /* CSI are not matched. */ 3098 ns1.csi = SPDK_NVME_CSI_ZNS; 3099 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 3100 } 3101 3102 static void 3103 test_init_ana_log_page(void) 3104 { 3105 struct spdk_nvme_transport_id trid = {}; 3106 struct spdk_nvme_ctrlr *ctrlr; 3107 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3108 struct nvme_ctrlr *nvme_ctrlr; 3109 const int STRING_SIZE = 32; 3110 const char *attached_names[STRING_SIZE]; 3111 int rc; 3112 3113 set_thread(0); 3114 3115 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3116 ut_init_trid(&trid); 3117 3118 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 3119 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3120 3121 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 3122 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 3123 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 3124 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 3125 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 3126 3127 g_ut_attach_ctrlr_status = 0; 3128 g_ut_attach_bdev_count = 5; 3129 3130 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3131 attach_ctrlr_done, NULL, &opts, NULL, false); 3132 CU_ASSERT(rc == 0); 3133 3134 spdk_delay_us(1000); 3135 poll_threads(); 3136 3137 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3138 poll_threads(); 3139 3140 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3141 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3142 3143 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3144 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3145 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3146 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3147 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3148 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3149 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3150 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3151 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3152 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3153 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3154 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3155 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3156 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3157 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3158 3159 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3160 CU_ASSERT(rc == 0); 3161 3162 poll_threads(); 3163 spdk_delay_us(1000); 3164 poll_threads(); 3165 3166 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3167 } 3168 3169 static void 3170 init_accel(void) 3171 { 3172 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3173 sizeof(int), "accel_p"); 3174 } 3175 3176 static void 3177 fini_accel(void) 3178 { 3179 spdk_io_device_unregister(g_accel_p, NULL); 3180 } 3181 3182 static void 3183 test_get_memory_domains(void) 3184 { 3185 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3186 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3187 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3188 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3189 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3190 struct spdk_memory_domain *domains[4] = {}; 3191 int rc = 0; 3192 3193 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3194 3195 /* nvme controller doesn't have memory domains */ 3196 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3197 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3198 CU_ASSERT(rc == 0); 3199 CU_ASSERT(domains[0] == NULL); 3200 CU_ASSERT(domains[1] == NULL); 3201 3202 /* nvme controller has a memory domain */ 3203 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3204 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3205 CU_ASSERT(rc == 1); 3206 CU_ASSERT(domains[0] != NULL); 3207 memset(domains, 0, sizeof(domains)); 3208 3209 /* multipath, 2 controllers report 1 memory domain each */ 3210 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3211 3212 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3213 CU_ASSERT(rc == 2); 3214 CU_ASSERT(domains[0] != NULL); 3215 CU_ASSERT(domains[1] != NULL); 3216 memset(domains, 0, sizeof(domains)); 3217 3218 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3219 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3220 CU_ASSERT(rc == 2); 3221 3222 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3223 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3224 CU_ASSERT(rc == 2); 3225 CU_ASSERT(domains[0] == NULL); 3226 CU_ASSERT(domains[1] == NULL); 3227 3228 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3229 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3230 CU_ASSERT(rc == 2); 3231 CU_ASSERT(domains[0] != NULL); 3232 CU_ASSERT(domains[1] == NULL); 3233 memset(domains, 0, sizeof(domains)); 3234 3235 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3236 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3237 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3238 CU_ASSERT(rc == 4); 3239 CU_ASSERT(domains[0] != NULL); 3240 CU_ASSERT(domains[1] != NULL); 3241 CU_ASSERT(domains[2] != NULL); 3242 CU_ASSERT(domains[3] != NULL); 3243 memset(domains, 0, sizeof(domains)); 3244 3245 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3246 * Array size is less than the number of memory domains */ 3247 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3248 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3249 CU_ASSERT(rc == 4); 3250 CU_ASSERT(domains[0] != NULL); 3251 CU_ASSERT(domains[1] != NULL); 3252 CU_ASSERT(domains[2] != NULL); 3253 CU_ASSERT(domains[3] == NULL); 3254 memset(domains, 0, sizeof(domains)); 3255 3256 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3257 } 3258 3259 static void 3260 test_reconnect_qpair(void) 3261 { 3262 struct spdk_nvme_transport_id trid = {}; 3263 struct spdk_nvme_ctrlr *ctrlr; 3264 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3265 struct nvme_ctrlr *nvme_ctrlr; 3266 const int STRING_SIZE = 32; 3267 const char *attached_names[STRING_SIZE]; 3268 struct nvme_bdev *bdev; 3269 struct spdk_io_channel *ch1, *ch2; 3270 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3271 struct nvme_io_path *io_path1, *io_path2; 3272 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3273 int rc; 3274 3275 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3276 ut_init_trid(&trid); 3277 3278 set_thread(0); 3279 3280 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3281 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3282 3283 g_ut_attach_ctrlr_status = 0; 3284 g_ut_attach_bdev_count = 1; 3285 3286 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3287 attach_ctrlr_done, NULL, &opts, NULL, false); 3288 CU_ASSERT(rc == 0); 3289 3290 spdk_delay_us(1000); 3291 poll_threads(); 3292 3293 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3294 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3295 3296 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3297 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3298 3299 ch1 = spdk_get_io_channel(bdev); 3300 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3301 3302 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3303 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3304 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3305 nvme_qpair1 = io_path1->qpair; 3306 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3307 3308 set_thread(1); 3309 3310 ch2 = spdk_get_io_channel(bdev); 3311 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3312 3313 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3314 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3315 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3316 nvme_qpair2 = io_path2->qpair; 3317 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3318 3319 /* If a qpair is disconnected, it is freed and then reconnected via 3320 * resetting the corresponding nvme_ctrlr. 3321 */ 3322 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3323 ctrlr->is_failed = true; 3324 3325 poll_thread_times(1, 3); 3326 CU_ASSERT(nvme_qpair1->qpair != NULL); 3327 CU_ASSERT(nvme_qpair2->qpair == NULL); 3328 CU_ASSERT(nvme_ctrlr->resetting == true); 3329 3330 poll_thread_times(0, 3); 3331 CU_ASSERT(nvme_qpair1->qpair == NULL); 3332 CU_ASSERT(nvme_qpair2->qpair == NULL); 3333 CU_ASSERT(ctrlr->is_failed == true); 3334 3335 poll_thread_times(1, 2); 3336 poll_thread_times(0, 1); 3337 CU_ASSERT(ctrlr->is_failed == false); 3338 CU_ASSERT(ctrlr->adminq.is_connected == false); 3339 3340 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3341 poll_thread_times(0, 2); 3342 CU_ASSERT(ctrlr->adminq.is_connected == true); 3343 3344 poll_thread_times(0, 1); 3345 poll_thread_times(1, 1); 3346 CU_ASSERT(nvme_qpair1->qpair != NULL); 3347 CU_ASSERT(nvme_qpair2->qpair != NULL); 3348 CU_ASSERT(nvme_ctrlr->resetting == true); 3349 3350 poll_thread_times(0, 2); 3351 poll_thread_times(1, 1); 3352 poll_thread_times(0, 1); 3353 CU_ASSERT(nvme_ctrlr->resetting == false); 3354 3355 poll_threads(); 3356 3357 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3358 * fails, the qpair is just freed. 3359 */ 3360 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3361 ctrlr->is_failed = true; 3362 ctrlr->fail_reset = true; 3363 3364 poll_thread_times(1, 3); 3365 CU_ASSERT(nvme_qpair1->qpair != NULL); 3366 CU_ASSERT(nvme_qpair2->qpair == NULL); 3367 CU_ASSERT(nvme_ctrlr->resetting == true); 3368 3369 poll_thread_times(0, 3); 3370 poll_thread_times(1, 1); 3371 CU_ASSERT(nvme_qpair1->qpair == NULL); 3372 CU_ASSERT(nvme_qpair2->qpair == NULL); 3373 CU_ASSERT(ctrlr->is_failed == true); 3374 3375 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3376 poll_thread_times(0, 3); 3377 poll_thread_times(1, 1); 3378 poll_thread_times(0, 1); 3379 CU_ASSERT(ctrlr->is_failed == true); 3380 CU_ASSERT(nvme_ctrlr->resetting == false); 3381 CU_ASSERT(nvme_qpair1->qpair == NULL); 3382 CU_ASSERT(nvme_qpair2->qpair == NULL); 3383 3384 poll_threads(); 3385 3386 spdk_put_io_channel(ch2); 3387 3388 set_thread(0); 3389 3390 spdk_put_io_channel(ch1); 3391 3392 poll_threads(); 3393 3394 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3395 CU_ASSERT(rc == 0); 3396 3397 poll_threads(); 3398 spdk_delay_us(1000); 3399 poll_threads(); 3400 3401 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3402 } 3403 3404 static void 3405 test_create_bdev_ctrlr(void) 3406 { 3407 struct nvme_path_id path1 = {}, path2 = {}; 3408 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3409 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3410 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3411 const int STRING_SIZE = 32; 3412 const char *attached_names[STRING_SIZE]; 3413 int rc; 3414 3415 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3416 ut_init_trid(&path1.trid); 3417 ut_init_trid2(&path2.trid); 3418 3419 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3420 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3421 3422 g_ut_attach_ctrlr_status = 0; 3423 g_ut_attach_bdev_count = 0; 3424 3425 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3426 attach_ctrlr_done, NULL, &opts, NULL, true); 3427 CU_ASSERT(rc == 0); 3428 3429 spdk_delay_us(1000); 3430 poll_threads(); 3431 3432 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3433 poll_threads(); 3434 3435 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3436 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3437 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3438 3439 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3440 g_ut_attach_ctrlr_status = -EINVAL; 3441 3442 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3443 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3444 3445 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3446 3447 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3448 attach_ctrlr_done, NULL, &opts, NULL, true); 3449 CU_ASSERT(rc == 0); 3450 3451 spdk_delay_us(1000); 3452 poll_threads(); 3453 3454 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3455 poll_threads(); 3456 3457 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3458 3459 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3460 g_ut_attach_ctrlr_status = 0; 3461 3462 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3463 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3464 3465 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3466 attach_ctrlr_done, NULL, &opts, NULL, true); 3467 CU_ASSERT(rc == 0); 3468 3469 spdk_delay_us(1000); 3470 poll_threads(); 3471 3472 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3473 poll_threads(); 3474 3475 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3476 3477 /* Delete two ctrlrs at once. */ 3478 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3479 CU_ASSERT(rc == 0); 3480 3481 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3482 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3483 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3484 3485 poll_threads(); 3486 spdk_delay_us(1000); 3487 poll_threads(); 3488 3489 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3490 3491 /* Add two ctrlrs and delete one by one. */ 3492 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3493 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3494 3495 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3496 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3497 3498 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3499 attach_ctrlr_done, NULL, &opts, NULL, true); 3500 CU_ASSERT(rc == 0); 3501 3502 spdk_delay_us(1000); 3503 poll_threads(); 3504 3505 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3506 poll_threads(); 3507 3508 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3509 attach_ctrlr_done, NULL, &opts, NULL, true); 3510 CU_ASSERT(rc == 0); 3511 3512 spdk_delay_us(1000); 3513 poll_threads(); 3514 3515 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3516 poll_threads(); 3517 3518 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3519 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3520 3521 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3522 CU_ASSERT(rc == 0); 3523 3524 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3525 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) != NULL); 3526 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3527 3528 poll_threads(); 3529 spdk_delay_us(1000); 3530 poll_threads(); 3531 3532 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3533 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3534 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3535 3536 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3537 CU_ASSERT(rc == 0); 3538 3539 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3540 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3541 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) != NULL); 3542 3543 poll_threads(); 3544 spdk_delay_us(1000); 3545 poll_threads(); 3546 3547 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3548 } 3549 3550 static struct nvme_ns * 3551 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3552 { 3553 struct nvme_ns *nvme_ns; 3554 3555 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3556 if (nvme_ns->ctrlr == nvme_ctrlr) { 3557 return nvme_ns; 3558 } 3559 } 3560 3561 return NULL; 3562 } 3563 3564 static void 3565 test_add_multi_ns_to_bdev(void) 3566 { 3567 struct nvme_path_id path1 = {}, path2 = {}; 3568 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3569 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3570 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3571 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3572 struct nvme_ns *nvme_ns1, *nvme_ns2; 3573 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3574 const int STRING_SIZE = 32; 3575 const char *attached_names[STRING_SIZE]; 3576 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3577 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3578 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3579 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3580 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3581 int rc; 3582 3583 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3584 ut_init_trid(&path1.trid); 3585 ut_init_trid2(&path2.trid); 3586 3587 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3588 3589 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3590 * namespaces are populated. 3591 */ 3592 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3593 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3594 3595 ctrlr1->ns[1].is_active = false; 3596 ctrlr1->ns[4].is_active = false; 3597 ctrlr1->ns[0].uuid = &uuid1; 3598 ctrlr1->ns[2].uuid = &uuid3; 3599 ctrlr1->ns[3].uuid = &uuid4; 3600 3601 g_ut_attach_ctrlr_status = 0; 3602 g_ut_attach_bdev_count = 3; 3603 3604 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3605 attach_ctrlr_done, NULL, &opts, NULL, true); 3606 CU_ASSERT(rc == 0); 3607 3608 spdk_delay_us(1000); 3609 poll_threads(); 3610 3611 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3612 poll_threads(); 3613 3614 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3615 * namespaces are populated. The uuid of 4th namespace is different, and hence 3616 * adding 4th namespace to a bdev should fail. 3617 */ 3618 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3619 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3620 3621 ctrlr2->ns[2].is_active = false; 3622 ctrlr2->ns[4].is_active = false; 3623 ctrlr2->ns[0].uuid = &uuid1; 3624 ctrlr2->ns[1].uuid = &uuid2; 3625 ctrlr2->ns[3].uuid = &uuid44; 3626 3627 g_ut_attach_ctrlr_status = 0; 3628 g_ut_attach_bdev_count = 2; 3629 3630 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3631 attach_ctrlr_done, NULL, &opts, NULL, true); 3632 CU_ASSERT(rc == 0); 3633 3634 spdk_delay_us(1000); 3635 poll_threads(); 3636 3637 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3638 poll_threads(); 3639 3640 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3641 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3642 3643 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3644 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3645 3646 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3647 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3648 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3649 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3650 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3651 3652 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3653 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3654 3655 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3656 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3657 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3658 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3659 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3660 3661 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3662 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3663 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3664 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3665 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3666 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3667 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3668 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3669 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3670 3671 CU_ASSERT(bdev1->ref == 2); 3672 CU_ASSERT(bdev2->ref == 1); 3673 CU_ASSERT(bdev3->ref == 1); 3674 CU_ASSERT(bdev4->ref == 1); 3675 3676 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3677 rc = bdev_nvme_delete("nvme0", &path1, NULL, NULL); 3678 CU_ASSERT(rc == 0); 3679 3680 poll_threads(); 3681 spdk_delay_us(1000); 3682 poll_threads(); 3683 3684 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3685 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == NULL); 3686 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == nvme_ctrlr2); 3687 3688 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3689 CU_ASSERT(rc == 0); 3690 3691 poll_threads(); 3692 spdk_delay_us(1000); 3693 poll_threads(); 3694 3695 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3696 3697 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3698 * can be deleted when the bdev subsystem shutdown. 3699 */ 3700 g_ut_attach_bdev_count = 1; 3701 3702 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3703 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3704 3705 ctrlr1->ns[0].uuid = &uuid1; 3706 3707 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3708 attach_ctrlr_done, NULL, &opts, NULL, true); 3709 CU_ASSERT(rc == 0); 3710 3711 spdk_delay_us(1000); 3712 poll_threads(); 3713 3714 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3715 poll_threads(); 3716 3717 ut_init_trid2(&path2.trid); 3718 3719 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3720 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3721 3722 ctrlr2->ns[0].uuid = &uuid1; 3723 3724 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3725 attach_ctrlr_done, NULL, &opts, NULL, true); 3726 CU_ASSERT(rc == 0); 3727 3728 spdk_delay_us(1000); 3729 poll_threads(); 3730 3731 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3732 poll_threads(); 3733 3734 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3735 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3736 3737 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3738 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3739 3740 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3741 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3742 3743 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3744 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3745 3746 /* Check if a nvme_bdev has two nvme_ns. */ 3747 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3748 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3749 CU_ASSERT(nvme_ns1->bdev == bdev1); 3750 3751 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3752 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3753 CU_ASSERT(nvme_ns2->bdev == bdev1); 3754 3755 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3756 bdev_nvme_destruct(&bdev1->disk); 3757 3758 poll_threads(); 3759 3760 CU_ASSERT(nvme_ns1->bdev == NULL); 3761 CU_ASSERT(nvme_ns2->bdev == NULL); 3762 3763 nvme_ctrlr1->destruct = true; 3764 _nvme_ctrlr_destruct(nvme_ctrlr1); 3765 3766 poll_threads(); 3767 spdk_delay_us(1000); 3768 poll_threads(); 3769 3770 nvme_ctrlr2->destruct = true; 3771 _nvme_ctrlr_destruct(nvme_ctrlr2); 3772 3773 poll_threads(); 3774 spdk_delay_us(1000); 3775 poll_threads(); 3776 3777 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3778 } 3779 3780 static void 3781 test_add_multi_io_paths_to_nbdev_ch(void) 3782 { 3783 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3784 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3785 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3786 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3787 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3788 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3789 const int STRING_SIZE = 32; 3790 const char *attached_names[STRING_SIZE]; 3791 struct nvme_bdev *bdev; 3792 struct spdk_io_channel *ch; 3793 struct nvme_bdev_channel *nbdev_ch; 3794 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3795 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3796 int rc; 3797 3798 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3799 ut_init_trid(&path1.trid); 3800 ut_init_trid2(&path2.trid); 3801 ut_init_trid3(&path3.trid); 3802 g_ut_attach_ctrlr_status = 0; 3803 g_ut_attach_bdev_count = 1; 3804 3805 set_thread(1); 3806 3807 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3808 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3809 3810 ctrlr1->ns[0].uuid = &uuid1; 3811 3812 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3813 attach_ctrlr_done, NULL, &opts, NULL, true); 3814 CU_ASSERT(rc == 0); 3815 3816 spdk_delay_us(1000); 3817 poll_threads(); 3818 3819 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3820 poll_threads(); 3821 3822 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3823 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3824 3825 ctrlr2->ns[0].uuid = &uuid1; 3826 3827 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3828 attach_ctrlr_done, NULL, &opts, NULL, true); 3829 CU_ASSERT(rc == 0); 3830 3831 spdk_delay_us(1000); 3832 poll_threads(); 3833 3834 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3835 poll_threads(); 3836 3837 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3838 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3839 3840 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 3841 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3842 3843 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 3844 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3845 3846 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3847 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3848 3849 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3850 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3851 3852 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3853 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3854 3855 set_thread(0); 3856 3857 ch = spdk_get_io_channel(bdev); 3858 SPDK_CU_ASSERT_FATAL(ch != NULL); 3859 nbdev_ch = spdk_io_channel_get_ctx(ch); 3860 3861 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3862 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3863 3864 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3865 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3866 3867 set_thread(1); 3868 3869 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3870 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3871 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3872 3873 ctrlr3->ns[0].uuid = &uuid1; 3874 3875 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3876 attach_ctrlr_done, NULL, &opts, NULL, true); 3877 CU_ASSERT(rc == 0); 3878 3879 spdk_delay_us(1000); 3880 poll_threads(); 3881 3882 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3883 poll_threads(); 3884 3885 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn); 3886 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3887 3888 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3889 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3890 3891 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3892 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3893 3894 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3895 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 3896 CU_ASSERT(rc == 0); 3897 3898 poll_threads(); 3899 spdk_delay_us(1000); 3900 poll_threads(); 3901 3902 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn) == nvme_ctrlr1); 3903 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 3904 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid, opts.hostnqn) == nvme_ctrlr3); 3905 3906 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3907 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3908 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3909 3910 set_thread(0); 3911 3912 spdk_put_io_channel(ch); 3913 3914 poll_threads(); 3915 3916 set_thread(1); 3917 3918 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 3919 CU_ASSERT(rc == 0); 3920 3921 poll_threads(); 3922 spdk_delay_us(1000); 3923 poll_threads(); 3924 3925 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3926 } 3927 3928 static void 3929 test_admin_path(void) 3930 { 3931 struct nvme_path_id path1 = {}, path2 = {}; 3932 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3933 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 3934 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3935 const int STRING_SIZE = 32; 3936 const char *attached_names[STRING_SIZE]; 3937 struct nvme_bdev *bdev; 3938 struct spdk_io_channel *ch; 3939 struct spdk_bdev_io *bdev_io; 3940 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3941 int rc; 3942 3943 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3944 ut_init_trid(&path1.trid); 3945 ut_init_trid2(&path2.trid); 3946 g_ut_attach_ctrlr_status = 0; 3947 g_ut_attach_bdev_count = 1; 3948 3949 set_thread(0); 3950 3951 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3952 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3953 3954 ctrlr1->ns[0].uuid = &uuid1; 3955 3956 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3957 attach_ctrlr_done, NULL, &opts, NULL, true); 3958 CU_ASSERT(rc == 0); 3959 3960 spdk_delay_us(1000); 3961 poll_threads(); 3962 3963 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3964 poll_threads(); 3965 3966 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3967 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3968 3969 ctrlr2->ns[0].uuid = &uuid1; 3970 3971 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3972 attach_ctrlr_done, NULL, &opts, NULL, true); 3973 CU_ASSERT(rc == 0); 3974 3975 spdk_delay_us(1000); 3976 poll_threads(); 3977 3978 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3979 poll_threads(); 3980 3981 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3982 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3983 3984 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3985 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3986 3987 ch = spdk_get_io_channel(bdev); 3988 SPDK_CU_ASSERT_FATAL(ch != NULL); 3989 3990 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3991 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3992 3993 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3994 * submitted to ctrlr2. 3995 */ 3996 ctrlr1->is_failed = true; 3997 bdev_io->internal.f.in_submit_request = true; 3998 3999 bdev_nvme_submit_request(ch, bdev_io); 4000 4001 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4002 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 4003 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4004 4005 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4006 poll_threads(); 4007 4008 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 4009 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4010 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4011 4012 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 4013 ctrlr2->is_failed = true; 4014 bdev_io->internal.f.in_submit_request = true; 4015 4016 bdev_nvme_submit_request(ch, bdev_io); 4017 4018 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 4019 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 4020 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4021 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 4022 4023 free(bdev_io); 4024 4025 spdk_put_io_channel(ch); 4026 4027 poll_threads(); 4028 4029 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4030 CU_ASSERT(rc == 0); 4031 4032 poll_threads(); 4033 spdk_delay_us(1000); 4034 poll_threads(); 4035 4036 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4037 } 4038 4039 static struct nvme_io_path * 4040 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 4041 struct nvme_ctrlr *nvme_ctrlr) 4042 { 4043 struct nvme_io_path *io_path; 4044 4045 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 4046 if (io_path->qpair->ctrlr == nvme_ctrlr) { 4047 return io_path; 4048 } 4049 } 4050 4051 return NULL; 4052 } 4053 4054 static void 4055 test_reset_bdev_ctrlr(void) 4056 { 4057 struct nvme_path_id path1 = {}, path2 = {}; 4058 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4059 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4060 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4061 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4062 struct nvme_path_id *curr_path1, *curr_path2; 4063 const int STRING_SIZE = 32; 4064 const char *attached_names[STRING_SIZE]; 4065 struct nvme_bdev *bdev; 4066 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 4067 struct nvme_bdev_io *first_bio; 4068 struct spdk_io_channel *ch1, *ch2; 4069 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 4070 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 4071 int rc; 4072 4073 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4074 ut_init_trid(&path1.trid); 4075 ut_init_trid2(&path2.trid); 4076 g_ut_attach_ctrlr_status = 0; 4077 g_ut_attach_bdev_count = 1; 4078 4079 set_thread(0); 4080 4081 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4082 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4083 4084 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4085 attach_ctrlr_done, NULL, &opts, NULL, true); 4086 CU_ASSERT(rc == 0); 4087 4088 spdk_delay_us(1000); 4089 poll_threads(); 4090 4091 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4092 poll_threads(); 4093 4094 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4095 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4096 4097 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4098 attach_ctrlr_done, NULL, &opts, NULL, true); 4099 CU_ASSERT(rc == 0); 4100 4101 spdk_delay_us(1000); 4102 poll_threads(); 4103 4104 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4105 poll_threads(); 4106 4107 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4108 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4109 4110 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4111 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 4112 4113 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 4114 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 4115 4116 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4117 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 4118 4119 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 4120 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 4121 4122 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4123 SPDK_CU_ASSERT_FATAL(bdev != NULL); 4124 4125 set_thread(0); 4126 4127 ch1 = spdk_get_io_channel(bdev); 4128 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 4129 4130 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 4131 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 4132 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 4133 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 4134 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 4135 4136 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4137 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4138 4139 set_thread(1); 4140 4141 ch2 = spdk_get_io_channel(bdev); 4142 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4143 4144 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4145 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4146 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4147 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4148 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4149 4150 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4151 4152 /* The first reset request from bdev_io is submitted on thread 0. 4153 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4154 * 4155 * A few extra polls are necessary after resetting ctrlr1 to check 4156 * pending reset requests for ctrlr1. 4157 */ 4158 ctrlr1->is_failed = true; 4159 curr_path1->last_failed_tsc = spdk_get_ticks(); 4160 ctrlr2->is_failed = true; 4161 curr_path2->last_failed_tsc = spdk_get_ticks(); 4162 4163 set_thread(0); 4164 4165 bdev_nvme_submit_request(ch1, first_bdev_io); 4166 4167 poll_thread_times(0, 1); 4168 poll_thread_times(1, 1); 4169 poll_thread_times(0, 2); 4170 poll_thread_times(1, 1); 4171 poll_thread_times(0, 1); 4172 4173 CU_ASSERT(first_bio->io_path == io_path11); 4174 CU_ASSERT(nvme_ctrlr1->resetting == true); 4175 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4176 4177 poll_thread_times(0, 3); 4178 CU_ASSERT(io_path11->qpair->qpair == NULL); 4179 CU_ASSERT(io_path21->qpair->qpair != NULL); 4180 4181 poll_thread_times(1, 2); 4182 CU_ASSERT(io_path11->qpair->qpair == NULL); 4183 CU_ASSERT(io_path21->qpair->qpair == NULL); 4184 CU_ASSERT(ctrlr1->is_failed == true); 4185 4186 poll_thread_times(0, 1); 4187 CU_ASSERT(nvme_ctrlr1->resetting == true); 4188 CU_ASSERT(ctrlr1->is_failed == false); 4189 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4190 CU_ASSERT(curr_path1->last_failed_tsc != 0); 4191 4192 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4193 poll_thread_times(0, 2); 4194 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4195 4196 poll_thread_times(0, 1); 4197 CU_ASSERT(io_path11->qpair->qpair != NULL); 4198 CU_ASSERT(io_path21->qpair->qpair == NULL); 4199 4200 poll_thread_times(1, 1); 4201 CU_ASSERT(io_path11->qpair->qpair != NULL); 4202 CU_ASSERT(io_path21->qpair->qpair != NULL); 4203 4204 poll_thread_times(0, 2); 4205 CU_ASSERT(nvme_ctrlr1->resetting == true); 4206 poll_thread_times(1, 1); 4207 CU_ASSERT(nvme_ctrlr1->resetting == true); 4208 poll_thread_times(0, 2); 4209 CU_ASSERT(nvme_ctrlr1->resetting == false); 4210 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4211 CU_ASSERT(first_bio->io_path == io_path12); 4212 CU_ASSERT(nvme_ctrlr2->resetting == true); 4213 4214 poll_thread_times(0, 3); 4215 CU_ASSERT(io_path12->qpair->qpair == NULL); 4216 CU_ASSERT(io_path22->qpair->qpair != NULL); 4217 4218 poll_thread_times(1, 2); 4219 CU_ASSERT(io_path12->qpair->qpair == NULL); 4220 CU_ASSERT(io_path22->qpair->qpair == NULL); 4221 CU_ASSERT(ctrlr2->is_failed == true); 4222 4223 poll_thread_times(0, 1); 4224 CU_ASSERT(nvme_ctrlr2->resetting == true); 4225 CU_ASSERT(ctrlr2->is_failed == false); 4226 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4227 CU_ASSERT(curr_path2->last_failed_tsc != 0); 4228 4229 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4230 poll_thread_times(0, 2); 4231 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4232 4233 poll_thread_times(0, 1); 4234 CU_ASSERT(io_path12->qpair->qpair != NULL); 4235 CU_ASSERT(io_path22->qpair->qpair == NULL); 4236 4237 poll_thread_times(1, 2); 4238 CU_ASSERT(io_path12->qpair->qpair != NULL); 4239 CU_ASSERT(io_path22->qpair->qpair != NULL); 4240 4241 poll_thread_times(0, 2); 4242 CU_ASSERT(nvme_ctrlr2->resetting == true); 4243 poll_thread_times(1, 1); 4244 CU_ASSERT(nvme_ctrlr2->resetting == true); 4245 poll_thread_times(0, 2); 4246 CU_ASSERT(first_bio->io_path == NULL); 4247 CU_ASSERT(nvme_ctrlr2->resetting == false); 4248 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4249 4250 poll_threads(); 4251 4252 /* There is a race between two reset requests from bdev_io. 4253 * 4254 * The first reset request is submitted on thread 0, and the second reset 4255 * request is submitted on thread 1 while the first is resetting ctrlr1. 4256 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4257 * both reset requests go to ctrlr2. The first comes earlier than the second. 4258 * The second is pending on ctrlr2 again. After the first completes resetting 4259 * ctrl2, both complete successfully. 4260 */ 4261 ctrlr1->is_failed = true; 4262 curr_path1->last_failed_tsc = spdk_get_ticks(); 4263 ctrlr2->is_failed = true; 4264 curr_path2->last_failed_tsc = spdk_get_ticks(); 4265 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4266 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4267 4268 set_thread(0); 4269 4270 bdev_nvme_submit_request(ch1, first_bdev_io); 4271 4272 set_thread(1); 4273 4274 bdev_nvme_submit_request(ch2, second_bdev_io); 4275 4276 poll_thread_times(0, 1); 4277 poll_thread_times(1, 1); 4278 poll_thread_times(0, 2); 4279 poll_thread_times(1, 1); 4280 poll_thread_times(0, 1); 4281 poll_thread_times(1, 1); 4282 4283 CU_ASSERT(nvme_ctrlr1->resetting == true); 4284 CU_ASSERT(nvme_ctrlr1->ctrlr_op_cb_arg == first_bio); 4285 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == 4286 (struct nvme_bdev_io *)second_bdev_io->driver_ctx); 4287 4288 poll_threads(); 4289 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4290 poll_threads(); 4291 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4292 poll_threads(); 4293 4294 CU_ASSERT(ctrlr1->is_failed == false); 4295 CU_ASSERT(curr_path1->last_failed_tsc == 0); 4296 CU_ASSERT(ctrlr2->is_failed == false); 4297 CU_ASSERT(curr_path2->last_failed_tsc == 0); 4298 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4299 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4300 4301 set_thread(0); 4302 4303 spdk_put_io_channel(ch1); 4304 4305 set_thread(1); 4306 4307 spdk_put_io_channel(ch2); 4308 4309 poll_threads(); 4310 4311 set_thread(0); 4312 4313 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4314 CU_ASSERT(rc == 0); 4315 4316 poll_threads(); 4317 spdk_delay_us(1000); 4318 poll_threads(); 4319 4320 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4321 4322 free(first_bdev_io); 4323 free(second_bdev_io); 4324 } 4325 4326 static void 4327 test_find_io_path(void) 4328 { 4329 struct nvme_bdev_channel nbdev_ch = { 4330 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4331 }; 4332 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4333 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4334 struct spdk_nvme_ns ns1 = {}, ns2 = {}; 4335 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4336 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4337 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4338 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4339 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }; 4340 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4341 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4342 4343 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4344 4345 /* Test if io_path whose ANA state is not accessible is excluded. */ 4346 4347 nvme_qpair1.qpair = &qpair1; 4348 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4349 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4350 4351 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4352 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4353 4354 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4355 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4356 4357 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4358 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4359 4360 nbdev_ch.current_io_path = NULL; 4361 4362 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4363 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4364 4365 nbdev_ch.current_io_path = NULL; 4366 4367 /* Test if io_path whose qpair is resetting is excluded. */ 4368 4369 nvme_qpair1.qpair = NULL; 4370 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4371 4372 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4373 4374 /* Test if ANA optimized state or the first found ANA non-optimized state 4375 * is prioritized. 4376 */ 4377 4378 nvme_qpair1.qpair = &qpair1; 4379 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4380 nvme_qpair2.qpair = &qpair2; 4381 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4382 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4383 4384 nbdev_ch.current_io_path = NULL; 4385 4386 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4387 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4388 4389 nbdev_ch.current_io_path = NULL; 4390 } 4391 4392 static void 4393 test_retry_io_if_ana_state_is_updating(void) 4394 { 4395 struct nvme_path_id path = {}; 4396 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 4397 struct spdk_nvme_ctrlr *ctrlr; 4398 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 4399 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4400 struct nvme_ctrlr *nvme_ctrlr; 4401 const int STRING_SIZE = 32; 4402 const char *attached_names[STRING_SIZE]; 4403 struct nvme_bdev *bdev; 4404 struct nvme_ns *nvme_ns; 4405 struct spdk_bdev_io *bdev_io1; 4406 struct spdk_io_channel *ch; 4407 struct nvme_bdev_channel *nbdev_ch; 4408 struct nvme_io_path *io_path; 4409 struct nvme_qpair *nvme_qpair; 4410 int rc; 4411 4412 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4413 ut_init_trid(&path.trid); 4414 4415 set_thread(0); 4416 4417 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4418 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4419 4420 g_ut_attach_ctrlr_status = 0; 4421 g_ut_attach_bdev_count = 1; 4422 4423 opts.ctrlr_loss_timeout_sec = -1; 4424 opts.reconnect_delay_sec = 1; 4425 4426 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4427 attach_ctrlr_done, NULL, &dopts, &opts, false); 4428 CU_ASSERT(rc == 0); 4429 4430 spdk_delay_us(1000); 4431 poll_threads(); 4432 4433 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4434 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4435 4436 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 4437 CU_ASSERT(nvme_ctrlr != NULL); 4438 4439 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4440 CU_ASSERT(bdev != NULL); 4441 4442 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4443 CU_ASSERT(nvme_ns != NULL); 4444 4445 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4446 ut_bdev_io_set_buf(bdev_io1); 4447 4448 ch = spdk_get_io_channel(bdev); 4449 SPDK_CU_ASSERT_FATAL(ch != NULL); 4450 4451 nbdev_ch = spdk_io_channel_get_ctx(ch); 4452 4453 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4454 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4455 4456 nvme_qpair = io_path->qpair; 4457 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4458 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4459 4460 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4461 4462 /* If qpair is connected, I/O should succeed. */ 4463 bdev_io1->internal.f.in_submit_request = true; 4464 4465 bdev_nvme_submit_request(ch, bdev_io1); 4466 CU_ASSERT(bdev_io1->internal.f.in_submit_request == true); 4467 4468 poll_threads(); 4469 CU_ASSERT(bdev_io1->internal.f.in_submit_request == false); 4470 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4471 4472 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4473 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4474 nbdev_ch->current_io_path = NULL; 4475 4476 bdev_io1->internal.f.in_submit_request = true; 4477 4478 bdev_nvme_submit_request(ch, bdev_io1); 4479 4480 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4481 CU_ASSERT(bdev_io1->internal.f.in_submit_request == true); 4482 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4483 4484 /* ANA state became accessible while I/O was queued. */ 4485 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4486 4487 spdk_delay_us(1000000); 4488 4489 poll_thread_times(0, 1); 4490 4491 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4492 CU_ASSERT(bdev_io1->internal.f.in_submit_request == true); 4493 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4494 4495 poll_threads(); 4496 4497 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4498 CU_ASSERT(bdev_io1->internal.f.in_submit_request == false); 4499 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4500 4501 free(bdev_io1); 4502 4503 spdk_put_io_channel(ch); 4504 4505 poll_threads(); 4506 4507 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4508 CU_ASSERT(rc == 0); 4509 4510 poll_threads(); 4511 spdk_delay_us(1000); 4512 poll_threads(); 4513 4514 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4515 } 4516 4517 static void 4518 test_retry_io_for_io_path_error(void) 4519 { 4520 struct nvme_path_id path1 = {}, path2 = {}; 4521 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4522 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4523 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4524 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4525 const int STRING_SIZE = 32; 4526 const char *attached_names[STRING_SIZE]; 4527 struct nvme_bdev *bdev; 4528 struct nvme_ns *nvme_ns1, *nvme_ns2; 4529 struct spdk_bdev_io *bdev_io; 4530 struct nvme_bdev_io *bio; 4531 struct spdk_io_channel *ch; 4532 struct nvme_bdev_channel *nbdev_ch; 4533 struct nvme_io_path *io_path1, *io_path2; 4534 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4535 struct ut_nvme_req *req; 4536 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4537 int rc; 4538 4539 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4540 ut_init_trid(&path1.trid); 4541 ut_init_trid2(&path2.trid); 4542 4543 g_opts.bdev_retry_count = 1; 4544 4545 set_thread(0); 4546 4547 g_ut_attach_ctrlr_status = 0; 4548 g_ut_attach_bdev_count = 1; 4549 4550 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4551 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4552 4553 ctrlr1->ns[0].uuid = &uuid1; 4554 4555 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4556 attach_ctrlr_done, NULL, &opts, NULL, true); 4557 CU_ASSERT(rc == 0); 4558 4559 spdk_delay_us(1000); 4560 poll_threads(); 4561 4562 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4563 poll_threads(); 4564 4565 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4566 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4567 4568 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 4569 CU_ASSERT(nvme_ctrlr1 != NULL); 4570 4571 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4572 CU_ASSERT(bdev != NULL); 4573 4574 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4575 CU_ASSERT(nvme_ns1 != NULL); 4576 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4577 4578 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4579 ut_bdev_io_set_buf(bdev_io); 4580 4581 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4582 4583 ch = spdk_get_io_channel(bdev); 4584 SPDK_CU_ASSERT_FATAL(ch != NULL); 4585 4586 nbdev_ch = spdk_io_channel_get_ctx(ch); 4587 4588 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4589 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4590 4591 nvme_qpair1 = io_path1->qpair; 4592 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4593 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4594 4595 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4596 4597 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4598 bdev_io->internal.f.in_submit_request = true; 4599 4600 bdev_nvme_submit_request(ch, bdev_io); 4601 4602 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4603 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4604 4605 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4606 SPDK_CU_ASSERT_FATAL(req != NULL); 4607 4608 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4609 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4610 req->cpl.status.dnr = 1; 4611 4612 poll_thread_times(0, 1); 4613 4614 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4615 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4616 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4617 4618 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4619 bdev_io->internal.f.in_submit_request = true; 4620 4621 bdev_nvme_submit_request(ch, bdev_io); 4622 4623 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4624 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4625 4626 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4627 SPDK_CU_ASSERT_FATAL(req != NULL); 4628 4629 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4630 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4631 4632 poll_thread_times(0, 1); 4633 4634 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4635 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4636 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4637 4638 poll_threads(); 4639 4640 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4641 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4642 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4643 4644 /* Add io_path2 dynamically, and create a multipath configuration. */ 4645 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4646 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4647 4648 ctrlr2->ns[0].uuid = &uuid1; 4649 4650 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4651 attach_ctrlr_done, NULL, &opts, NULL, true); 4652 CU_ASSERT(rc == 0); 4653 4654 spdk_delay_us(1000); 4655 poll_threads(); 4656 4657 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4658 poll_threads(); 4659 4660 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 4661 CU_ASSERT(nvme_ctrlr2 != NULL); 4662 4663 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4664 CU_ASSERT(nvme_ns2 != NULL); 4665 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4666 4667 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4668 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4669 4670 nvme_qpair2 = io_path2->qpair; 4671 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4672 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4673 4674 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4675 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4676 * So after a retry, I/O is submitted to io_path2 and should succeed. 4677 */ 4678 bdev_io->internal.f.in_submit_request = true; 4679 4680 bdev_nvme_submit_request(ch, bdev_io); 4681 4682 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4683 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4684 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4685 4686 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4687 SPDK_CU_ASSERT_FATAL(req != NULL); 4688 4689 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4690 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4691 4692 poll_thread_times(0, 1); 4693 4694 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4695 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4696 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4697 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4698 4699 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4700 nvme_qpair1->qpair = NULL; 4701 4702 poll_threads(); 4703 4704 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4705 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4706 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4707 4708 free(bdev_io); 4709 4710 spdk_put_io_channel(ch); 4711 4712 poll_threads(); 4713 4714 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4715 CU_ASSERT(rc == 0); 4716 4717 poll_threads(); 4718 spdk_delay_us(1000); 4719 poll_threads(); 4720 4721 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4722 4723 g_opts.bdev_retry_count = 0; 4724 } 4725 4726 static void 4727 test_retry_io_count(void) 4728 { 4729 struct nvme_path_id path = {}; 4730 struct spdk_nvme_ctrlr *ctrlr; 4731 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4732 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4733 struct nvme_ctrlr *nvme_ctrlr; 4734 const int STRING_SIZE = 32; 4735 const char *attached_names[STRING_SIZE]; 4736 struct nvme_bdev *bdev; 4737 struct nvme_ns *nvme_ns; 4738 struct spdk_bdev_io *bdev_io; 4739 struct nvme_bdev_io *bio; 4740 struct spdk_io_channel *ch; 4741 struct nvme_bdev_channel *nbdev_ch; 4742 struct nvme_io_path *io_path; 4743 struct nvme_qpair *nvme_qpair; 4744 struct ut_nvme_req *req; 4745 int rc; 4746 4747 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4748 ut_init_trid(&path.trid); 4749 4750 set_thread(0); 4751 4752 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4753 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4754 4755 g_ut_attach_ctrlr_status = 0; 4756 g_ut_attach_bdev_count = 1; 4757 4758 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4759 attach_ctrlr_done, NULL, &opts, NULL, false); 4760 CU_ASSERT(rc == 0); 4761 4762 spdk_delay_us(1000); 4763 poll_threads(); 4764 4765 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4766 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4767 4768 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 4769 CU_ASSERT(nvme_ctrlr != NULL); 4770 4771 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4772 CU_ASSERT(bdev != NULL); 4773 4774 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4775 CU_ASSERT(nvme_ns != NULL); 4776 4777 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4778 ut_bdev_io_set_buf(bdev_io); 4779 4780 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4781 4782 ch = spdk_get_io_channel(bdev); 4783 SPDK_CU_ASSERT_FATAL(ch != NULL); 4784 4785 nbdev_ch = spdk_io_channel_get_ctx(ch); 4786 4787 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4788 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4789 4790 nvme_qpair = io_path->qpair; 4791 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4792 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4793 4794 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4795 4796 /* If I/O is aborted by request, it should not be retried. */ 4797 g_opts.bdev_retry_count = 1; 4798 4799 bdev_io->internal.f.in_submit_request = true; 4800 4801 bdev_nvme_submit_request(ch, bdev_io); 4802 4803 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4804 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4805 4806 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4807 SPDK_CU_ASSERT_FATAL(req != NULL); 4808 4809 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4810 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4811 4812 poll_thread_times(0, 1); 4813 4814 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4815 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4816 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4817 4818 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4819 * the failed I/O should not be retried. 4820 */ 4821 g_opts.bdev_retry_count = 4; 4822 4823 bdev_io->internal.f.in_submit_request = true; 4824 4825 bdev_nvme_submit_request(ch, bdev_io); 4826 4827 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4828 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4829 4830 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4831 SPDK_CU_ASSERT_FATAL(req != NULL); 4832 4833 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4834 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4835 bio->retry_count = 4; 4836 4837 poll_thread_times(0, 1); 4838 4839 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4840 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4841 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4842 4843 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4844 g_opts.bdev_retry_count = -1; 4845 4846 bdev_io->internal.f.in_submit_request = true; 4847 4848 bdev_nvme_submit_request(ch, bdev_io); 4849 4850 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4851 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4852 4853 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4854 SPDK_CU_ASSERT_FATAL(req != NULL); 4855 4856 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4857 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4858 bio->retry_count = 4; 4859 4860 poll_thread_times(0, 1); 4861 4862 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4863 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4864 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4865 4866 poll_threads(); 4867 4868 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4869 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4870 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4871 4872 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4873 * the failed I/O should be retried. 4874 */ 4875 g_opts.bdev_retry_count = 4; 4876 4877 bdev_io->internal.f.in_submit_request = true; 4878 4879 bdev_nvme_submit_request(ch, bdev_io); 4880 4881 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4882 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4883 4884 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4885 SPDK_CU_ASSERT_FATAL(req != NULL); 4886 4887 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4888 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4889 bio->retry_count = 3; 4890 4891 poll_thread_times(0, 1); 4892 4893 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4894 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 4895 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 4896 4897 poll_threads(); 4898 4899 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4900 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 4901 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4902 4903 free(bdev_io); 4904 4905 spdk_put_io_channel(ch); 4906 4907 poll_threads(); 4908 4909 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 4910 CU_ASSERT(rc == 0); 4911 4912 poll_threads(); 4913 spdk_delay_us(1000); 4914 poll_threads(); 4915 4916 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4917 4918 g_opts.bdev_retry_count = 0; 4919 } 4920 4921 static void 4922 test_concurrent_read_ana_log_page(void) 4923 { 4924 struct spdk_nvme_transport_id trid = {}; 4925 struct spdk_nvme_ctrlr *ctrlr; 4926 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 4927 struct nvme_ctrlr *nvme_ctrlr; 4928 const int STRING_SIZE = 32; 4929 const char *attached_names[STRING_SIZE]; 4930 int rc; 4931 4932 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4933 ut_init_trid(&trid); 4934 4935 set_thread(0); 4936 4937 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4938 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4939 4940 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4941 4942 g_ut_attach_ctrlr_status = 0; 4943 g_ut_attach_bdev_count = 1; 4944 4945 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4946 attach_ctrlr_done, NULL, &opts, NULL, false); 4947 CU_ASSERT(rc == 0); 4948 4949 spdk_delay_us(1000); 4950 poll_threads(); 4951 4952 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4953 poll_threads(); 4954 4955 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4956 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4957 4958 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4959 4960 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4961 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4962 4963 /* Following read request should be rejected. */ 4964 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4965 4966 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4967 4968 set_thread(1); 4969 4970 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4971 4972 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4973 4974 /* Reset request while reading ANA log page should not be rejected. */ 4975 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4976 CU_ASSERT(rc == 0); 4977 4978 poll_threads(); 4979 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4980 poll_threads(); 4981 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4982 poll_threads(); 4983 4984 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4985 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4986 4987 /* Read ANA log page while resetting ctrlr should be rejected. */ 4988 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 4989 CU_ASSERT(rc == 0); 4990 4991 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4992 4993 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4994 4995 poll_threads(); 4996 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4997 poll_threads(); 4998 4999 set_thread(0); 5000 5001 /* It is possible that target sent ANA change for inactive namespaces. 5002 * 5003 * Previously, assert() was added because this case was unlikely. 5004 * However, assert() was hit in real environment. 5005 5006 * Hence, remove assert() and add unit test case. 5007 * 5008 * Simulate this case by depopulating namespaces and then parsing ANA 5009 * log page created when all namespaces are active. 5010 * Then, check if parsing ANA log page completes successfully. 5011 */ 5012 nvme_ctrlr_depopulate_namespaces(nvme_ctrlr); 5013 5014 rc = bdev_nvme_parse_ana_log_page(nvme_ctrlr, nvme_ctrlr_set_ana_states, nvme_ctrlr); 5015 CU_ASSERT(rc == 0); 5016 5017 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5018 CU_ASSERT(rc == 0); 5019 5020 poll_threads(); 5021 spdk_delay_us(1000); 5022 poll_threads(); 5023 5024 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5025 } 5026 5027 static void 5028 test_retry_io_for_ana_error(void) 5029 { 5030 struct nvme_path_id path = {}; 5031 struct spdk_nvme_ctrlr *ctrlr; 5032 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5033 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5034 struct nvme_ctrlr *nvme_ctrlr; 5035 const int STRING_SIZE = 32; 5036 const char *attached_names[STRING_SIZE]; 5037 struct nvme_bdev *bdev; 5038 struct nvme_ns *nvme_ns; 5039 struct spdk_bdev_io *bdev_io; 5040 struct nvme_bdev_io *bio; 5041 struct spdk_io_channel *ch; 5042 struct nvme_bdev_channel *nbdev_ch; 5043 struct nvme_io_path *io_path; 5044 struct nvme_qpair *nvme_qpair; 5045 struct ut_nvme_req *req; 5046 uint64_t now; 5047 int rc; 5048 5049 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5050 ut_init_trid(&path.trid); 5051 5052 g_opts.bdev_retry_count = 1; 5053 5054 set_thread(0); 5055 5056 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 5057 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5058 5059 g_ut_attach_ctrlr_status = 0; 5060 g_ut_attach_bdev_count = 1; 5061 5062 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5063 attach_ctrlr_done, NULL, &opts, NULL, false); 5064 CU_ASSERT(rc == 0); 5065 5066 spdk_delay_us(1000); 5067 poll_threads(); 5068 5069 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5070 poll_threads(); 5071 5072 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5073 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5074 5075 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, opts.hostnqn); 5076 CU_ASSERT(nvme_ctrlr != NULL); 5077 5078 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5079 CU_ASSERT(bdev != NULL); 5080 5081 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5082 CU_ASSERT(nvme_ns != NULL); 5083 5084 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5085 ut_bdev_io_set_buf(bdev_io); 5086 5087 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 5088 5089 ch = spdk_get_io_channel(bdev); 5090 SPDK_CU_ASSERT_FATAL(ch != NULL); 5091 5092 nbdev_ch = spdk_io_channel_get_ctx(ch); 5093 5094 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5095 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5096 5097 nvme_qpair = io_path->qpair; 5098 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5099 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5100 5101 now = spdk_get_ticks(); 5102 5103 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 5104 5105 /* If I/O got ANA error, it should be queued, the corresponding namespace 5106 * should be freezed and its ANA state should be updated. 5107 */ 5108 bdev_io->internal.f.in_submit_request = true; 5109 5110 bdev_nvme_submit_request(ch, bdev_io); 5111 5112 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5113 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 5114 5115 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 5116 SPDK_CU_ASSERT_FATAL(req != NULL); 5117 5118 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5119 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 5120 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 5121 5122 poll_thread_times(0, 1); 5123 5124 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5125 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 5126 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5127 /* I/O should be retried immediately. */ 5128 CU_ASSERT(bio->retry_ticks == now); 5129 CU_ASSERT(nvme_ns->ana_state_updating == true); 5130 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 5131 5132 poll_threads(); 5133 5134 /* Namespace is inaccessible, and hence I/O should be queued again. */ 5135 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5136 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 5137 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5138 /* I/O should be retried after a second if no I/O path was found but 5139 * any I/O path may become available. 5140 */ 5141 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 5142 5143 /* Namespace should be unfreezed after completing to update its ANA state. */ 5144 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5145 poll_threads(); 5146 5147 CU_ASSERT(nvme_ns->ana_state_updating == false); 5148 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5149 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 5150 5151 /* Retry the queued I/O should succeed. */ 5152 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 5153 poll_threads(); 5154 5155 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5156 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 5157 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5158 5159 free(bdev_io); 5160 5161 spdk_put_io_channel(ch); 5162 5163 poll_threads(); 5164 5165 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5166 CU_ASSERT(rc == 0); 5167 5168 poll_threads(); 5169 spdk_delay_us(1000); 5170 poll_threads(); 5171 5172 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5173 5174 g_opts.bdev_retry_count = 0; 5175 } 5176 5177 static void 5178 test_check_io_error_resiliency_params(void) 5179 { 5180 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5181 * 3rd parameter is fast_io_fail_timeout_sec. 5182 */ 5183 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5184 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5185 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5186 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5187 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5188 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5189 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5190 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5191 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5192 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5193 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5194 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5195 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5196 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5197 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5198 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5199 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5200 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5201 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5202 } 5203 5204 static void 5205 test_retry_io_if_ctrlr_is_resetting(void) 5206 { 5207 struct nvme_path_id path = {}; 5208 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5209 struct spdk_nvme_ctrlr *ctrlr; 5210 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5211 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5212 struct nvme_ctrlr *nvme_ctrlr; 5213 const int STRING_SIZE = 32; 5214 const char *attached_names[STRING_SIZE]; 5215 struct nvme_bdev *bdev; 5216 struct nvme_ns *nvme_ns; 5217 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5218 struct spdk_io_channel *ch; 5219 struct nvme_bdev_channel *nbdev_ch; 5220 struct nvme_io_path *io_path; 5221 struct nvme_qpair *nvme_qpair; 5222 int rc; 5223 5224 g_opts.bdev_retry_count = 1; 5225 5226 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5227 ut_init_trid(&path.trid); 5228 5229 set_thread(0); 5230 5231 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5232 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5233 5234 g_ut_attach_ctrlr_status = 0; 5235 g_ut_attach_bdev_count = 1; 5236 5237 opts.ctrlr_loss_timeout_sec = -1; 5238 opts.reconnect_delay_sec = 1; 5239 5240 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5241 attach_ctrlr_done, NULL, &dopts, &opts, false); 5242 CU_ASSERT(rc == 0); 5243 5244 spdk_delay_us(1000); 5245 poll_threads(); 5246 5247 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5248 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5249 5250 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5251 CU_ASSERT(nvme_ctrlr != NULL); 5252 5253 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5254 CU_ASSERT(bdev != NULL); 5255 5256 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5257 CU_ASSERT(nvme_ns != NULL); 5258 5259 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5260 ut_bdev_io_set_buf(bdev_io1); 5261 5262 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5263 ut_bdev_io_set_buf(bdev_io2); 5264 5265 ch = spdk_get_io_channel(bdev); 5266 SPDK_CU_ASSERT_FATAL(ch != NULL); 5267 5268 nbdev_ch = spdk_io_channel_get_ctx(ch); 5269 5270 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5271 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5272 5273 nvme_qpair = io_path->qpair; 5274 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5275 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5276 5277 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5278 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5279 5280 /* If qpair is connected, I/O should succeed. */ 5281 bdev_io1->internal.f.in_submit_request = true; 5282 5283 bdev_nvme_submit_request(ch, bdev_io1); 5284 CU_ASSERT(bdev_io1->internal.f.in_submit_request == true); 5285 5286 poll_threads(); 5287 CU_ASSERT(bdev_io1->internal.f.in_submit_request == false); 5288 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5289 5290 /* If qpair is disconnected, it is freed and then reconnected via resetting 5291 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5292 * while resetting the nvme_ctrlr. 5293 */ 5294 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5295 ctrlr->is_failed = true; 5296 5297 poll_thread_times(0, 5); 5298 5299 CU_ASSERT(nvme_qpair->qpair == NULL); 5300 CU_ASSERT(nvme_ctrlr->resetting == true); 5301 CU_ASSERT(ctrlr->is_failed == false); 5302 5303 bdev_io1->internal.f.in_submit_request = true; 5304 5305 bdev_nvme_submit_request(ch, bdev_io1); 5306 5307 spdk_delay_us(1); 5308 5309 bdev_io2->internal.f.in_submit_request = true; 5310 5311 bdev_nvme_submit_request(ch, bdev_io2); 5312 5313 CU_ASSERT(bdev_io1->internal.f.in_submit_request == true); 5314 CU_ASSERT(bdev_io2->internal.f.in_submit_request == true); 5315 CU_ASSERT(bdev_io1 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5316 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx( 5317 TAILQ_NEXT((struct nvme_bdev_io *)bdev_io1->driver_ctx, 5318 retry_link))); 5319 5320 poll_threads(); 5321 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5322 poll_threads(); 5323 5324 CU_ASSERT(nvme_qpair->qpair != NULL); 5325 CU_ASSERT(nvme_ctrlr->resetting == false); 5326 5327 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5328 5329 poll_thread_times(0, 1); 5330 5331 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5332 CU_ASSERT(bdev_io1->internal.f.in_submit_request == true); 5333 CU_ASSERT(bdev_io2->internal.f.in_submit_request == true); 5334 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5335 5336 poll_threads(); 5337 5338 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5339 CU_ASSERT(bdev_io1->internal.f.in_submit_request == false); 5340 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5341 CU_ASSERT(bdev_io2->internal.f.in_submit_request == true); 5342 CU_ASSERT(bdev_io2 == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5343 5344 spdk_delay_us(1); 5345 5346 poll_thread_times(0, 1); 5347 5348 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5349 CU_ASSERT(bdev_io2->internal.f.in_submit_request == true); 5350 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5351 5352 poll_threads(); 5353 5354 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5355 CU_ASSERT(bdev_io2->internal.f.in_submit_request == false); 5356 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5357 5358 free(bdev_io1); 5359 free(bdev_io2); 5360 5361 spdk_put_io_channel(ch); 5362 5363 poll_threads(); 5364 5365 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5366 CU_ASSERT(rc == 0); 5367 5368 poll_threads(); 5369 spdk_delay_us(1000); 5370 poll_threads(); 5371 5372 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5373 5374 g_opts.bdev_retry_count = 0; 5375 } 5376 5377 static void 5378 test_reconnect_ctrlr(void) 5379 { 5380 struct spdk_nvme_transport_id trid = {}; 5381 struct spdk_nvme_ctrlr ctrlr = {}; 5382 struct nvme_ctrlr *nvme_ctrlr; 5383 struct spdk_io_channel *ch1, *ch2; 5384 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5385 int rc; 5386 5387 ut_init_trid(&trid); 5388 TAILQ_INIT(&ctrlr.active_io_qpairs); 5389 5390 set_thread(0); 5391 5392 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5393 CU_ASSERT(rc == 0); 5394 5395 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5396 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5397 5398 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5399 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5400 5401 ch1 = spdk_get_io_channel(nvme_ctrlr); 5402 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5403 5404 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5405 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5406 5407 set_thread(1); 5408 5409 ch2 = spdk_get_io_channel(nvme_ctrlr); 5410 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5411 5412 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5413 5414 /* Reset starts from thread 1. */ 5415 set_thread(1); 5416 5417 /* The reset should fail and a reconnect timer should be registered. */ 5418 ctrlr.fail_reset = true; 5419 ctrlr.is_failed = true; 5420 5421 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5422 CU_ASSERT(rc == 0); 5423 CU_ASSERT(nvme_ctrlr->resetting == true); 5424 CU_ASSERT(ctrlr.is_failed == true); 5425 5426 poll_threads(); 5427 5428 CU_ASSERT(nvme_ctrlr->resetting == false); 5429 CU_ASSERT(ctrlr.is_failed == false); 5430 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5431 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5432 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5433 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5434 5435 /* A new reset starts from thread 0. */ 5436 set_thread(1); 5437 5438 /* The reset should cancel the reconnect timer and should start from reconnection. 5439 * Then, the reset should fail and a reconnect timer should be registered again. 5440 */ 5441 ctrlr.fail_reset = true; 5442 ctrlr.is_failed = true; 5443 5444 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5445 CU_ASSERT(rc == 0); 5446 CU_ASSERT(nvme_ctrlr->resetting == true); 5447 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5448 CU_ASSERT(ctrlr.is_failed == true); 5449 5450 poll_threads(); 5451 5452 CU_ASSERT(nvme_ctrlr->resetting == false); 5453 CU_ASSERT(ctrlr.is_failed == false); 5454 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5455 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5456 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5457 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5458 5459 /* Then a reconnect retry should suceeed. */ 5460 ctrlr.fail_reset = false; 5461 5462 spdk_delay_us(SPDK_SEC_TO_USEC); 5463 poll_thread_times(0, 1); 5464 5465 CU_ASSERT(nvme_ctrlr->resetting == true); 5466 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5467 5468 poll_threads(); 5469 5470 CU_ASSERT(nvme_ctrlr->resetting == false); 5471 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5472 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5473 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5474 5475 /* The reset should fail and a reconnect timer should be registered. */ 5476 ctrlr.fail_reset = true; 5477 ctrlr.is_failed = true; 5478 5479 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5480 CU_ASSERT(rc == 0); 5481 CU_ASSERT(nvme_ctrlr->resetting == true); 5482 CU_ASSERT(ctrlr.is_failed == true); 5483 5484 poll_threads(); 5485 5486 CU_ASSERT(nvme_ctrlr->resetting == false); 5487 CU_ASSERT(ctrlr.is_failed == false); 5488 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5489 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5490 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5491 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5492 5493 /* Then a reconnect retry should still fail. */ 5494 spdk_delay_us(SPDK_SEC_TO_USEC); 5495 poll_thread_times(0, 1); 5496 5497 CU_ASSERT(nvme_ctrlr->resetting == true); 5498 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5499 5500 poll_threads(); 5501 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5502 poll_threads(); 5503 5504 CU_ASSERT(nvme_ctrlr->resetting == false); 5505 CU_ASSERT(ctrlr.is_failed == false); 5506 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5507 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5508 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5509 5510 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5511 spdk_delay_us(SPDK_SEC_TO_USEC); 5512 poll_threads(); 5513 5514 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5515 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5516 CU_ASSERT(nvme_ctrlr->destruct == true); 5517 5518 spdk_put_io_channel(ch2); 5519 5520 set_thread(0); 5521 5522 spdk_put_io_channel(ch1); 5523 5524 poll_threads(); 5525 spdk_delay_us(1000); 5526 poll_threads(); 5527 5528 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5529 } 5530 5531 static struct nvme_path_id * 5532 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5533 const struct spdk_nvme_transport_id *trid) 5534 { 5535 struct nvme_path_id *p; 5536 5537 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5538 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5539 break; 5540 } 5541 } 5542 5543 return p; 5544 } 5545 5546 static void 5547 test_retry_failover_ctrlr(void) 5548 { 5549 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5550 struct spdk_nvme_ctrlr ctrlr = {}; 5551 struct nvme_ctrlr *nvme_ctrlr = NULL; 5552 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5553 struct spdk_io_channel *ch; 5554 struct nvme_ctrlr_channel *ctrlr_ch; 5555 int rc; 5556 5557 ut_init_trid(&trid1); 5558 ut_init_trid2(&trid2); 5559 ut_init_trid3(&trid3); 5560 TAILQ_INIT(&ctrlr.active_io_qpairs); 5561 5562 set_thread(0); 5563 5564 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5565 CU_ASSERT(rc == 0); 5566 5567 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5568 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5569 5570 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5571 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5572 5573 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5574 CU_ASSERT(rc == 0); 5575 5576 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5577 CU_ASSERT(rc == 0); 5578 5579 ch = spdk_get_io_channel(nvme_ctrlr); 5580 SPDK_CU_ASSERT_FATAL(ch != NULL); 5581 5582 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5583 5584 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5585 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5586 CU_ASSERT(path_id1->last_failed_tsc == 0); 5587 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5588 5589 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5590 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5591 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5592 5593 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5594 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5595 5596 /* It is expected that connecting both of trid1, trid2, and trid3 fail, 5597 * and a reconnect timer is started. */ 5598 ctrlr.fail_reset = true; 5599 ctrlr.is_failed = true; 5600 5601 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5602 CU_ASSERT(rc == 0); 5603 5604 poll_threads(); 5605 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5606 poll_threads(); 5607 5608 CU_ASSERT(nvme_ctrlr->resetting == false); 5609 CU_ASSERT(ctrlr.is_failed == false); 5610 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5611 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5612 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5613 CU_ASSERT(path_id1->last_failed_tsc != 0); 5614 5615 CU_ASSERT(path_id2->last_failed_tsc != 0); 5616 CU_ASSERT(path_id3->last_failed_tsc != 0); 5617 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5618 5619 /* If we remove trid1 while reconnect is scheduled, trid1 is removed and path_id is 5620 * switched to trid2 but reset is not started. 5621 */ 5622 rc = bdev_nvme_failover_ctrlr_unsafe(nvme_ctrlr, true); 5623 CU_ASSERT(rc == -EALREADY); 5624 5625 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid1) == NULL); 5626 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5627 5628 CU_ASSERT(nvme_ctrlr->resetting == false); 5629 5630 /* If reconnect succeeds, trid2 should be the active path_id */ 5631 ctrlr.fail_reset = false; 5632 5633 spdk_delay_us(SPDK_SEC_TO_USEC); 5634 poll_thread_times(0, 1); 5635 5636 CU_ASSERT(nvme_ctrlr->resetting == true); 5637 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5638 5639 poll_threads(); 5640 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5641 poll_threads(); 5642 5643 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) != NULL); 5644 CU_ASSERT(path_id2->last_failed_tsc == 0); 5645 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5646 CU_ASSERT(nvme_ctrlr->resetting == false); 5647 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5648 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5649 5650 spdk_put_io_channel(ch); 5651 5652 poll_threads(); 5653 5654 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 5655 CU_ASSERT(rc == 0); 5656 5657 poll_threads(); 5658 spdk_delay_us(1000); 5659 poll_threads(); 5660 5661 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5662 } 5663 5664 static void 5665 test_fail_path(void) 5666 { 5667 struct nvme_path_id path = {}; 5668 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 5669 struct spdk_nvme_ctrlr *ctrlr; 5670 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 5671 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5672 struct nvme_ctrlr *nvme_ctrlr; 5673 const int STRING_SIZE = 32; 5674 const char *attached_names[STRING_SIZE]; 5675 struct nvme_bdev *bdev; 5676 struct nvme_ns *nvme_ns; 5677 struct spdk_bdev_io *bdev_io; 5678 struct spdk_io_channel *ch; 5679 struct nvme_bdev_channel *nbdev_ch; 5680 struct nvme_io_path *io_path; 5681 struct nvme_ctrlr_channel *ctrlr_ch; 5682 int rc; 5683 5684 /* The test scenario is the following. 5685 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5686 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5687 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5688 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5689 * comes first. The queued I/O is failed. 5690 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5691 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5692 */ 5693 5694 g_opts.bdev_retry_count = 1; 5695 5696 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5697 ut_init_trid(&path.trid); 5698 5699 set_thread(0); 5700 5701 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5702 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5703 5704 g_ut_attach_ctrlr_status = 0; 5705 g_ut_attach_bdev_count = 1; 5706 5707 opts.ctrlr_loss_timeout_sec = 4; 5708 opts.reconnect_delay_sec = 1; 5709 opts.fast_io_fail_timeout_sec = 2; 5710 5711 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5712 attach_ctrlr_done, NULL, &dopts, &opts, false); 5713 CU_ASSERT(rc == 0); 5714 5715 spdk_delay_us(1000); 5716 poll_threads(); 5717 5718 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5719 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5720 5721 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 5722 CU_ASSERT(nvme_ctrlr != NULL); 5723 5724 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5725 CU_ASSERT(bdev != NULL); 5726 5727 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5728 CU_ASSERT(nvme_ns != NULL); 5729 5730 ch = spdk_get_io_channel(bdev); 5731 SPDK_CU_ASSERT_FATAL(ch != NULL); 5732 5733 nbdev_ch = spdk_io_channel_get_ctx(ch); 5734 5735 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5736 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5737 5738 ctrlr_ch = io_path->qpair->ctrlr_ch; 5739 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5740 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5741 5742 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5743 ut_bdev_io_set_buf(bdev_io); 5744 5745 5746 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5747 ctrlr->fail_reset = true; 5748 ctrlr->is_failed = true; 5749 5750 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 5751 CU_ASSERT(rc == 0); 5752 CU_ASSERT(nvme_ctrlr->resetting == true); 5753 CU_ASSERT(ctrlr->is_failed == true); 5754 5755 poll_threads(); 5756 5757 CU_ASSERT(nvme_ctrlr->resetting == false); 5758 CU_ASSERT(ctrlr->is_failed == false); 5759 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5760 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5761 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5762 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5763 5764 /* I/O should be queued. */ 5765 bdev_io->internal.f.in_submit_request = true; 5766 5767 bdev_nvme_submit_request(ch, bdev_io); 5768 5769 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 5770 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5771 5772 /* After a second, the I/O should be still queued and the ctrlr should be 5773 * still recovering. 5774 */ 5775 spdk_delay_us(SPDK_SEC_TO_USEC); 5776 poll_threads(); 5777 5778 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 5779 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 5780 5781 CU_ASSERT(nvme_ctrlr->resetting == false); 5782 CU_ASSERT(ctrlr->is_failed == false); 5783 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5784 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5785 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5786 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5787 5788 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5789 5790 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5791 spdk_delay_us(SPDK_SEC_TO_USEC); 5792 poll_threads(); 5793 5794 CU_ASSERT(nvme_ctrlr->resetting == false); 5795 CU_ASSERT(ctrlr->is_failed == false); 5796 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5797 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5798 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5799 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5800 5801 /* Then within a second, pending I/O should be failed. */ 5802 spdk_delay_us(SPDK_SEC_TO_USEC); 5803 poll_threads(); 5804 5805 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5806 poll_threads(); 5807 5808 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 5809 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5810 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5811 5812 /* Another I/O submission should be failed immediately. */ 5813 bdev_io->internal.f.in_submit_request = true; 5814 5815 bdev_nvme_submit_request(ch, bdev_io); 5816 5817 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 5818 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5819 5820 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5821 * be deleted. 5822 */ 5823 spdk_delay_us(SPDK_SEC_TO_USEC); 5824 poll_threads(); 5825 5826 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5827 poll_threads(); 5828 5829 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5830 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5831 CU_ASSERT(nvme_ctrlr->destruct == true); 5832 5833 spdk_put_io_channel(ch); 5834 5835 poll_threads(); 5836 spdk_delay_us(1000); 5837 poll_threads(); 5838 5839 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5840 5841 free(bdev_io); 5842 5843 g_opts.bdev_retry_count = 0; 5844 } 5845 5846 static void 5847 test_nvme_ns_cmp(void) 5848 { 5849 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5850 5851 nvme_ns1.id = 0; 5852 nvme_ns2.id = UINT32_MAX; 5853 5854 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5855 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5856 } 5857 5858 static void 5859 test_ana_transition(void) 5860 { 5861 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5862 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5863 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5864 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5865 5866 /* case 1: ANA transition timedout is canceled. */ 5867 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5868 nvme_ns.ana_transition_timedout = true; 5869 5870 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5871 5872 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5873 5874 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5875 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5876 5877 /* case 2: ANATT timer is kept. */ 5878 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5879 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5880 &nvme_ns, 5881 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5882 5883 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5884 5885 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5886 5887 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5888 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5889 5890 /* case 3: ANATT timer is stopped. */ 5891 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5892 5893 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5894 5895 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5896 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5897 5898 /* ANATT timer is started. */ 5899 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5900 5901 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5902 5903 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5904 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5905 5906 /* ANATT timer is expired. */ 5907 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5908 5909 poll_threads(); 5910 5911 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5912 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5913 } 5914 5915 static void 5916 _set_preferred_path_cb(void *cb_arg, int rc) 5917 { 5918 bool *done = cb_arg; 5919 5920 *done = true; 5921 } 5922 5923 static void 5924 test_set_preferred_path(void) 5925 { 5926 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5927 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5928 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 5929 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5930 const int STRING_SIZE = 32; 5931 const char *attached_names[STRING_SIZE]; 5932 struct nvme_bdev *bdev; 5933 struct spdk_io_channel *ch; 5934 struct nvme_bdev_channel *nbdev_ch; 5935 struct nvme_io_path *io_path; 5936 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5937 const struct spdk_nvme_ctrlr_data *cdata; 5938 bool done; 5939 int rc; 5940 5941 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5942 ut_init_trid(&path1.trid); 5943 ut_init_trid2(&path2.trid); 5944 ut_init_trid3(&path3.trid); 5945 g_ut_attach_ctrlr_status = 0; 5946 g_ut_attach_bdev_count = 1; 5947 5948 set_thread(0); 5949 5950 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5951 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5952 5953 ctrlr1->ns[0].uuid = &uuid1; 5954 5955 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5956 attach_ctrlr_done, NULL, &opts, NULL, true); 5957 CU_ASSERT(rc == 0); 5958 5959 spdk_delay_us(1000); 5960 poll_threads(); 5961 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5962 poll_threads(); 5963 5964 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5965 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5966 5967 ctrlr2->ns[0].uuid = &uuid1; 5968 5969 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5970 attach_ctrlr_done, NULL, &opts, NULL, true); 5971 CU_ASSERT(rc == 0); 5972 5973 spdk_delay_us(1000); 5974 poll_threads(); 5975 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5976 poll_threads(); 5977 5978 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5979 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5980 5981 ctrlr3->ns[0].uuid = &uuid1; 5982 5983 rc = spdk_bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5984 attach_ctrlr_done, NULL, &opts, NULL, true); 5985 CU_ASSERT(rc == 0); 5986 5987 spdk_delay_us(1000); 5988 poll_threads(); 5989 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5990 poll_threads(); 5991 5992 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5993 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5994 5995 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5996 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5997 5998 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5999 6000 ch = spdk_get_io_channel(bdev); 6001 SPDK_CU_ASSERT_FATAL(ch != NULL); 6002 nbdev_ch = spdk_io_channel_get_ctx(ch); 6003 6004 io_path = bdev_nvme_find_io_path(nbdev_ch); 6005 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6006 6007 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6008 6009 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 6010 * should return io_path to ctrlr2. 6011 */ 6012 6013 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 6014 done = false; 6015 6016 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6017 6018 poll_threads(); 6019 CU_ASSERT(done == true); 6020 6021 io_path = bdev_nvme_find_io_path(nbdev_ch); 6022 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6023 6024 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6025 6026 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 6027 * acquired, find_io_path() should return io_path to ctrlr3. 6028 */ 6029 6030 spdk_put_io_channel(ch); 6031 6032 poll_threads(); 6033 6034 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 6035 done = false; 6036 6037 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6038 6039 poll_threads(); 6040 CU_ASSERT(done == true); 6041 6042 ch = spdk_get_io_channel(bdev); 6043 SPDK_CU_ASSERT_FATAL(ch != NULL); 6044 nbdev_ch = spdk_io_channel_get_ctx(ch); 6045 6046 io_path = bdev_nvme_find_io_path(nbdev_ch); 6047 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6048 6049 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 6050 6051 spdk_put_io_channel(ch); 6052 6053 poll_threads(); 6054 6055 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6056 CU_ASSERT(rc == 0); 6057 6058 poll_threads(); 6059 spdk_delay_us(1000); 6060 poll_threads(); 6061 6062 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6063 } 6064 6065 static void 6066 test_find_next_io_path(void) 6067 { 6068 struct nvme_bdev_channel nbdev_ch = { 6069 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6070 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6071 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 6072 }; 6073 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6074 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6075 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6076 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6077 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6078 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6079 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6080 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6081 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6082 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6083 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6084 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6085 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6086 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6087 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6088 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6089 6090 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6091 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6092 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6093 6094 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 6095 * is covered in test_find_io_path. 6096 */ 6097 6098 nbdev_ch.current_io_path = &io_path2; 6099 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6100 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6101 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6102 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6103 6104 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6105 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6106 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6107 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6108 6109 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6110 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6111 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6112 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6113 6114 nbdev_ch.current_io_path = &io_path3; 6115 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6116 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6117 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6118 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6119 6120 /* Test if next io_path is selected according to rr_min_io */ 6121 6122 nbdev_ch.current_io_path = NULL; 6123 nbdev_ch.rr_min_io = 2; 6124 nbdev_ch.rr_counter = 0; 6125 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6126 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6127 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6128 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6129 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6130 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6131 6132 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6133 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6134 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6135 } 6136 6137 static void 6138 test_find_io_path_min_qd(void) 6139 { 6140 struct nvme_bdev_channel nbdev_ch = { 6141 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 6142 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6143 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 6144 }; 6145 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 6146 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 6147 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 6148 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 6149 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 6150 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 6151 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 6152 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 6153 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 6154 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 6155 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 6156 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 6157 struct nvme_ns nvme_ns1 = { .ns = &ns1, }, nvme_ns2 = { .ns = &ns2, }, nvme_ns3 = { .ns = &ns3, }; 6158 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 6159 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 6160 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 6161 6162 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 6163 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 6164 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 6165 6166 /* Test if the minimum io_outstanding or the ANA optimized state is 6167 * prioritized when using least queue depth selector 6168 */ 6169 qpair1.num_outstanding_reqs = 2; 6170 qpair2.num_outstanding_reqs = 1; 6171 qpair3.num_outstanding_reqs = 0; 6172 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6173 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6174 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6175 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6176 6177 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6178 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 6179 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6180 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6181 6182 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6183 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 6184 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 6185 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 6186 6187 qpair2.num_outstanding_reqs = 4; 6188 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 6189 } 6190 6191 static void 6192 test_disable_auto_failback(void) 6193 { 6194 struct nvme_path_id path1 = {}, path2 = {}; 6195 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6196 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6197 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6198 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6199 struct nvme_ctrlr *nvme_ctrlr1; 6200 const int STRING_SIZE = 32; 6201 const char *attached_names[STRING_SIZE]; 6202 struct nvme_bdev *bdev; 6203 struct spdk_io_channel *ch; 6204 struct nvme_bdev_channel *nbdev_ch; 6205 struct nvme_io_path *io_path; 6206 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6207 const struct spdk_nvme_ctrlr_data *cdata; 6208 bool done; 6209 int rc; 6210 6211 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6212 ut_init_trid(&path1.trid); 6213 ut_init_trid2(&path2.trid); 6214 g_ut_attach_ctrlr_status = 0; 6215 g_ut_attach_bdev_count = 1; 6216 6217 g_opts.disable_auto_failback = true; 6218 6219 opts.ctrlr_loss_timeout_sec = -1; 6220 opts.reconnect_delay_sec = 1; 6221 6222 set_thread(0); 6223 6224 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6225 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6226 6227 ctrlr1->ns[0].uuid = &uuid1; 6228 6229 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6230 attach_ctrlr_done, NULL, &dopts, &opts, true); 6231 CU_ASSERT(rc == 0); 6232 6233 spdk_delay_us(1000); 6234 poll_threads(); 6235 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6236 poll_threads(); 6237 6238 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6239 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6240 6241 ctrlr2->ns[0].uuid = &uuid1; 6242 6243 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6244 attach_ctrlr_done, NULL, &dopts, &opts, true); 6245 CU_ASSERT(rc == 0); 6246 6247 spdk_delay_us(1000); 6248 poll_threads(); 6249 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6250 poll_threads(); 6251 6252 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6253 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6254 6255 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6256 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6257 6258 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, dopts.hostnqn); 6259 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6260 6261 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6262 6263 ch = spdk_get_io_channel(bdev); 6264 SPDK_CU_ASSERT_FATAL(ch != NULL); 6265 nbdev_ch = spdk_io_channel_get_ctx(ch); 6266 6267 io_path = bdev_nvme_find_io_path(nbdev_ch); 6268 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6269 6270 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6271 6272 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6273 ctrlr1->fail_reset = true; 6274 ctrlr1->is_failed = true; 6275 6276 bdev_nvme_reset_ctrlr(nvme_ctrlr1); 6277 6278 poll_threads(); 6279 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6280 poll_threads(); 6281 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6282 poll_threads(); 6283 6284 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6285 6286 io_path = bdev_nvme_find_io_path(nbdev_ch); 6287 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6288 6289 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6290 6291 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6292 * Hence, io_path to ctrlr2 should still be used. 6293 */ 6294 ctrlr1->fail_reset = false; 6295 6296 spdk_delay_us(SPDK_SEC_TO_USEC); 6297 poll_threads(); 6298 6299 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6300 6301 io_path = bdev_nvme_find_io_path(nbdev_ch); 6302 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6303 6304 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6305 6306 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6307 * be used again. 6308 */ 6309 6310 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6311 done = false; 6312 6313 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6314 6315 poll_threads(); 6316 CU_ASSERT(done == true); 6317 6318 io_path = bdev_nvme_find_io_path(nbdev_ch); 6319 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6320 6321 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6322 6323 spdk_put_io_channel(ch); 6324 6325 poll_threads(); 6326 6327 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6328 CU_ASSERT(rc == 0); 6329 6330 poll_threads(); 6331 spdk_delay_us(1000); 6332 poll_threads(); 6333 6334 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6335 6336 g_opts.disable_auto_failback = false; 6337 } 6338 6339 static void 6340 ut_set_multipath_policy_done(void *cb_arg, int rc) 6341 { 6342 int *done = cb_arg; 6343 6344 SPDK_CU_ASSERT_FATAL(done != NULL); 6345 *done = rc; 6346 } 6347 6348 static void 6349 test_set_multipath_policy(void) 6350 { 6351 struct nvme_path_id path1 = {}, path2 = {}; 6352 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 6353 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6354 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 6355 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6356 const int STRING_SIZE = 32; 6357 const char *attached_names[STRING_SIZE]; 6358 struct nvme_bdev *bdev; 6359 struct spdk_io_channel *ch; 6360 struct nvme_bdev_channel *nbdev_ch; 6361 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6362 int done; 6363 int rc; 6364 6365 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6366 ut_init_trid(&path1.trid); 6367 ut_init_trid2(&path2.trid); 6368 g_ut_attach_ctrlr_status = 0; 6369 g_ut_attach_bdev_count = 1; 6370 6371 g_opts.disable_auto_failback = true; 6372 6373 opts.ctrlr_loss_timeout_sec = -1; 6374 opts.reconnect_delay_sec = 1; 6375 6376 set_thread(0); 6377 6378 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6379 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6380 6381 ctrlr1->ns[0].uuid = &uuid1; 6382 6383 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6384 attach_ctrlr_done, NULL, &dopts, &opts, true); 6385 CU_ASSERT(rc == 0); 6386 6387 spdk_delay_us(1000); 6388 poll_threads(); 6389 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6390 poll_threads(); 6391 6392 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6393 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6394 6395 ctrlr2->ns[0].uuid = &uuid1; 6396 6397 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6398 attach_ctrlr_done, NULL, &dopts, &opts, true); 6399 CU_ASSERT(rc == 0); 6400 6401 spdk_delay_us(1000); 6402 poll_threads(); 6403 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6404 poll_threads(); 6405 6406 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6407 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6408 6409 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6410 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6411 6412 /* If multipath policy is updated before getting any I/O channel, 6413 * an new I/O channel should have the update. 6414 */ 6415 done = -1; 6416 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6417 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6418 ut_set_multipath_policy_done, &done); 6419 poll_threads(); 6420 CU_ASSERT(done == 0); 6421 6422 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6423 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6424 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6425 6426 ch = spdk_get_io_channel(bdev); 6427 SPDK_CU_ASSERT_FATAL(ch != NULL); 6428 nbdev_ch = spdk_io_channel_get_ctx(ch); 6429 6430 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6431 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6432 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6433 6434 /* If multipath policy is updated while a I/O channel is active, 6435 * the update should be applied to the I/O channel immediately. 6436 */ 6437 done = -1; 6438 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6439 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6440 ut_set_multipath_policy_done, &done); 6441 poll_threads(); 6442 CU_ASSERT(done == 0); 6443 6444 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6445 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6446 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6447 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6448 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6449 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6450 6451 spdk_put_io_channel(ch); 6452 6453 poll_threads(); 6454 6455 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6456 CU_ASSERT(rc == 0); 6457 6458 poll_threads(); 6459 spdk_delay_us(1000); 6460 poll_threads(); 6461 6462 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6463 } 6464 6465 static void 6466 test_uuid_generation(void) 6467 { 6468 uint32_t nsid1 = 1, nsid2 = 2; 6469 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6470 char sn3[21] = " "; 6471 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6472 struct spdk_uuid uuid1, uuid2; 6473 int rc; 6474 6475 /* Test case 1: 6476 * Serial numbers are the same, nsids are different. 6477 * Compare two generated UUID - they should be different. */ 6478 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6479 CU_ASSERT(rc == 0); 6480 rc = nvme_generate_uuid(sn1, nsid2, &uuid2); 6481 CU_ASSERT(rc == 0); 6482 6483 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6484 6485 /* Test case 2: 6486 * Serial numbers differ only by one character, nsids are the same. 6487 * Compare two generated UUID - they should be different. */ 6488 rc = nvme_generate_uuid(sn1, nsid1, &uuid1); 6489 CU_ASSERT(rc == 0); 6490 rc = nvme_generate_uuid(sn2, nsid1, &uuid2); 6491 CU_ASSERT(rc == 0); 6492 6493 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6494 6495 /* Test case 3: 6496 * Serial number comprises only of space characters. 6497 * Validate the generated UUID. */ 6498 rc = nvme_generate_uuid(sn3, nsid1, &uuid1); 6499 CU_ASSERT(rc == 0); 6500 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6501 6502 } 6503 6504 static void 6505 test_retry_io_to_same_path(void) 6506 { 6507 struct nvme_path_id path1 = {}, path2 = {}; 6508 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6509 struct spdk_nvme_ctrlr_opts opts = {.hostnqn = UT_HOSTNQN}; 6510 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6511 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6512 const int STRING_SIZE = 32; 6513 const char *attached_names[STRING_SIZE]; 6514 struct nvme_bdev *bdev; 6515 struct spdk_bdev_io *bdev_io; 6516 struct nvme_bdev_io *bio; 6517 struct spdk_io_channel *ch; 6518 struct nvme_bdev_channel *nbdev_ch; 6519 struct nvme_io_path *io_path1, *io_path2; 6520 struct ut_nvme_req *req; 6521 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6522 int done; 6523 int rc; 6524 6525 g_opts.nvme_ioq_poll_period_us = 1; 6526 6527 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6528 ut_init_trid(&path1.trid); 6529 ut_init_trid2(&path2.trid); 6530 g_ut_attach_ctrlr_status = 0; 6531 g_ut_attach_bdev_count = 1; 6532 6533 set_thread(0); 6534 6535 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6536 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6537 6538 ctrlr1->ns[0].uuid = &uuid1; 6539 6540 rc = spdk_bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6541 attach_ctrlr_done, NULL, &opts, NULL, true); 6542 CU_ASSERT(rc == 0); 6543 6544 spdk_delay_us(1000); 6545 poll_threads(); 6546 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6547 poll_threads(); 6548 6549 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6550 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6551 6552 ctrlr2->ns[0].uuid = &uuid1; 6553 6554 rc = spdk_bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6555 attach_ctrlr_done, NULL, &opts, NULL, true); 6556 CU_ASSERT(rc == 0); 6557 6558 spdk_delay_us(1000); 6559 poll_threads(); 6560 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6561 poll_threads(); 6562 6563 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6564 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6565 6566 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid, opts.hostnqn); 6567 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6568 6569 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn); 6570 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6571 6572 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6573 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6574 6575 done = -1; 6576 spdk_bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6577 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6578 poll_threads(); 6579 CU_ASSERT(done == 0); 6580 6581 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6582 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6583 CU_ASSERT(bdev->rr_min_io == 1); 6584 6585 ch = spdk_get_io_channel(bdev); 6586 SPDK_CU_ASSERT_FATAL(ch != NULL); 6587 nbdev_ch = spdk_io_channel_get_ctx(ch); 6588 6589 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6590 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6591 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6592 6593 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6594 ut_bdev_io_set_buf(bdev_io); 6595 6596 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6597 6598 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6599 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6600 6601 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6602 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6603 6604 /* The 1st I/O should be submitted to io_path1. */ 6605 bdev_io->internal.f.in_submit_request = true; 6606 6607 bdev_nvme_submit_request(ch, bdev_io); 6608 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 6609 CU_ASSERT(bio->io_path == io_path1); 6610 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6611 6612 spdk_delay_us(1); 6613 6614 poll_threads(); 6615 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 6616 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6617 6618 /* The 2nd I/O should be submitted to io_path2 because the path selection 6619 * policy is round-robin. 6620 */ 6621 bdev_io->internal.f.in_submit_request = true; 6622 6623 bdev_nvme_submit_request(ch, bdev_io); 6624 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 6625 CU_ASSERT(bio->io_path == io_path2); 6626 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6627 6628 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6629 SPDK_CU_ASSERT_FATAL(req != NULL); 6630 6631 /* Set retry count to non-zero. */ 6632 g_opts.bdev_retry_count = 2; 6633 6634 /* Inject an I/O error. */ 6635 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6636 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6637 6638 /* The 2nd I/O should be queued to nbdev_ch. */ 6639 spdk_delay_us(1); 6640 poll_thread_times(0, 1); 6641 6642 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6643 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 6644 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6645 6646 /* The 2nd I/O should keep caching io_path2. */ 6647 CU_ASSERT(bio->io_path == io_path2); 6648 6649 /* The 2nd I/O should be submitted to io_path2 again. */ 6650 poll_thread_times(0, 1); 6651 6652 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 6653 CU_ASSERT(bio->io_path == io_path2); 6654 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6655 6656 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6657 SPDK_CU_ASSERT_FATAL(req != NULL); 6658 6659 /* Inject an I/O error again. */ 6660 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6661 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6662 req->cpl.status.crd = 1; 6663 6664 ctrlr2->cdata.crdt[1] = 1; 6665 6666 /* The 2nd I/O should be queued to nbdev_ch. */ 6667 spdk_delay_us(1); 6668 poll_thread_times(0, 1); 6669 6670 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6671 CU_ASSERT(bdev_io->internal.f.in_submit_request == true); 6672 CU_ASSERT(bdev_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch->retry_io_list))); 6673 6674 /* The 2nd I/O should keep caching io_path2. */ 6675 CU_ASSERT(bio->io_path == io_path2); 6676 6677 /* Detach ctrlr2 dynamically. */ 6678 rc = bdev_nvme_delete("nvme0", &path2, NULL, NULL); 6679 CU_ASSERT(rc == 0); 6680 6681 spdk_delay_us(1000); 6682 poll_threads(); 6683 spdk_delay_us(1000); 6684 poll_threads(); 6685 spdk_delay_us(1000); 6686 poll_threads(); 6687 spdk_delay_us(1000); 6688 poll_threads(); 6689 6690 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid, opts.hostnqn) == NULL); 6691 6692 poll_threads(); 6693 spdk_delay_us(100000); 6694 poll_threads(); 6695 spdk_delay_us(1); 6696 poll_threads(); 6697 6698 /* The 2nd I/O should succeed by io_path1. */ 6699 CU_ASSERT(bdev_io->internal.f.in_submit_request == false); 6700 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6701 CU_ASSERT(bio->io_path == io_path1); 6702 6703 free(bdev_io); 6704 6705 spdk_put_io_channel(ch); 6706 6707 poll_threads(); 6708 spdk_delay_us(1); 6709 poll_threads(); 6710 6711 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6712 CU_ASSERT(rc == 0); 6713 6714 poll_threads(); 6715 spdk_delay_us(1000); 6716 poll_threads(); 6717 6718 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6719 6720 g_opts.nvme_ioq_poll_period_us = 0; 6721 g_opts.bdev_retry_count = 0; 6722 } 6723 6724 /* This case is to verify a fix for a complex race condition that 6725 * failover is lost if fabric connect command gets timeout while 6726 * controller is being reset. 6727 */ 6728 static void 6729 test_race_between_reset_and_disconnected(void) 6730 { 6731 struct spdk_nvme_transport_id trid = {}; 6732 struct spdk_nvme_ctrlr ctrlr = {}; 6733 struct nvme_ctrlr *nvme_ctrlr = NULL; 6734 struct nvme_path_id *curr_trid; 6735 struct spdk_io_channel *ch1, *ch2; 6736 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6737 int rc; 6738 6739 ut_init_trid(&trid); 6740 TAILQ_INIT(&ctrlr.active_io_qpairs); 6741 6742 set_thread(0); 6743 6744 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6745 CU_ASSERT(rc == 0); 6746 6747 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6748 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6749 6750 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6751 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6752 6753 ch1 = spdk_get_io_channel(nvme_ctrlr); 6754 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6755 6756 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6757 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6758 6759 set_thread(1); 6760 6761 ch2 = spdk_get_io_channel(nvme_ctrlr); 6762 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6763 6764 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6765 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6766 6767 /* Reset starts from thread 1. */ 6768 set_thread(1); 6769 6770 nvme_ctrlr->resetting = false; 6771 curr_trid->last_failed_tsc = spdk_get_ticks(); 6772 ctrlr.is_failed = true; 6773 6774 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 6775 CU_ASSERT(rc == 0); 6776 CU_ASSERT(nvme_ctrlr->resetting == true); 6777 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6778 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6779 6780 poll_thread_times(0, 3); 6781 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6782 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6783 6784 poll_thread_times(0, 1); 6785 poll_thread_times(1, 1); 6786 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 6787 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6788 CU_ASSERT(ctrlr.is_failed == true); 6789 6790 poll_thread_times(1, 1); 6791 poll_thread_times(0, 1); 6792 CU_ASSERT(ctrlr.is_failed == false); 6793 CU_ASSERT(ctrlr.adminq.is_connected == false); 6794 6795 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6796 poll_thread_times(0, 2); 6797 CU_ASSERT(ctrlr.adminq.is_connected == true); 6798 6799 poll_thread_times(0, 1); 6800 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6801 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 6802 6803 poll_thread_times(1, 1); 6804 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6805 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6806 CU_ASSERT(nvme_ctrlr->resetting == true); 6807 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6808 6809 poll_thread_times(0, 2); 6810 CU_ASSERT(nvme_ctrlr->resetting == true); 6811 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6812 poll_thread_times(1, 1); 6813 CU_ASSERT(nvme_ctrlr->resetting == true); 6814 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6815 6816 /* Here is just one poll before _bdev_nvme_reset_complete() is executed. 6817 * 6818 * spdk_nvme_ctrlr_reconnect_poll_async() returns success before fabric 6819 * connect command is executed. If fabric connect command gets timeout, 6820 * bdev_nvme_failover_ctrlr() is executed. This should be deferred until 6821 * _bdev_nvme_reset_complete() sets ctrlr->resetting to false. 6822 * 6823 * Simulate fabric connect command timeout by calling bdev_nvme_failover_ctrlr(). 6824 */ 6825 rc = bdev_nvme_failover_ctrlr(nvme_ctrlr); 6826 CU_ASSERT(rc == -EINPROGRESS); 6827 CU_ASSERT(nvme_ctrlr->resetting == true); 6828 CU_ASSERT(nvme_ctrlr->pending_failover == true); 6829 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6830 6831 poll_thread_times(0, 1); 6832 6833 CU_ASSERT(nvme_ctrlr->resetting == true); 6834 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6835 CU_ASSERT(curr_trid->last_failed_tsc != 0); 6836 6837 poll_threads(); 6838 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6839 poll_threads(); 6840 6841 CU_ASSERT(nvme_ctrlr->resetting == false); 6842 CU_ASSERT(nvme_ctrlr->pending_failover == false); 6843 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6844 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 6845 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 6846 6847 spdk_put_io_channel(ch2); 6848 6849 set_thread(0); 6850 6851 spdk_put_io_channel(ch1); 6852 6853 poll_threads(); 6854 6855 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6856 CU_ASSERT(rc == 0); 6857 6858 poll_threads(); 6859 spdk_delay_us(1000); 6860 poll_threads(); 6861 6862 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6863 } 6864 static void 6865 ut_ctrlr_op_rpc_cb(void *cb_arg, int rc) 6866 { 6867 int *_rc = (int *)cb_arg; 6868 6869 SPDK_CU_ASSERT_FATAL(_rc != NULL); 6870 *_rc = rc; 6871 } 6872 6873 static void 6874 test_ctrlr_op_rpc(void) 6875 { 6876 struct spdk_nvme_transport_id trid = {}; 6877 struct spdk_nvme_ctrlr ctrlr = {}; 6878 struct nvme_ctrlr *nvme_ctrlr = NULL; 6879 struct nvme_path_id *curr_trid; 6880 struct spdk_io_channel *ch1, *ch2; 6881 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 6882 int ctrlr_op_rc; 6883 int rc; 6884 6885 ut_init_trid(&trid); 6886 TAILQ_INIT(&ctrlr.active_io_qpairs); 6887 6888 set_thread(0); 6889 6890 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 6891 CU_ASSERT(rc == 0); 6892 6893 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 6894 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 6895 6896 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 6897 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 6898 6899 ch1 = spdk_get_io_channel(nvme_ctrlr); 6900 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 6901 6902 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 6903 CU_ASSERT(ctrlr_ch1->qpair != NULL); 6904 6905 set_thread(1); 6906 6907 ch2 = spdk_get_io_channel(nvme_ctrlr); 6908 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 6909 6910 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 6911 CU_ASSERT(ctrlr_ch2->qpair != NULL); 6912 6913 /* Reset starts from thread 1. */ 6914 set_thread(1); 6915 6916 /* Case 1: ctrlr is already being destructed. */ 6917 nvme_ctrlr->destruct = true; 6918 ctrlr_op_rc = 0; 6919 6920 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6921 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6922 6923 poll_threads(); 6924 6925 CU_ASSERT(ctrlr_op_rc == -ENXIO); 6926 6927 /* Case 2: reset is in progress. */ 6928 nvme_ctrlr->destruct = false; 6929 nvme_ctrlr->resetting = true; 6930 ctrlr_op_rc = 0; 6931 6932 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6933 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6934 6935 poll_threads(); 6936 6937 CU_ASSERT(ctrlr_op_rc == -EBUSY); 6938 6939 /* Case 3: reset completes successfully. */ 6940 nvme_ctrlr->resetting = false; 6941 curr_trid->last_failed_tsc = spdk_get_ticks(); 6942 ctrlr.is_failed = true; 6943 ctrlr_op_rc = -1; 6944 6945 nvme_ctrlr_op_rpc(nvme_ctrlr, NVME_CTRLR_OP_RESET, 6946 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6947 6948 CU_ASSERT(nvme_ctrlr->resetting == true); 6949 CU_ASSERT(ctrlr_op_rc == -1); 6950 6951 poll_threads(); 6952 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6953 poll_threads(); 6954 6955 CU_ASSERT(nvme_ctrlr->resetting == false); 6956 CU_ASSERT(curr_trid->last_failed_tsc == 0); 6957 CU_ASSERT(ctrlr.is_failed == false); 6958 CU_ASSERT(ctrlr_op_rc == 0); 6959 6960 /* Case 4: invalid operation. */ 6961 nvme_ctrlr_op_rpc(nvme_ctrlr, -1, 6962 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 6963 6964 poll_threads(); 6965 6966 CU_ASSERT(ctrlr_op_rc == -EINVAL); 6967 6968 spdk_put_io_channel(ch2); 6969 6970 set_thread(0); 6971 6972 spdk_put_io_channel(ch1); 6973 6974 poll_threads(); 6975 6976 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 6977 CU_ASSERT(rc == 0); 6978 6979 poll_threads(); 6980 spdk_delay_us(1000); 6981 poll_threads(); 6982 6983 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6984 } 6985 6986 static void 6987 test_bdev_ctrlr_op_rpc(void) 6988 { 6989 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 6990 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 6991 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6992 struct nvme_ctrlr *nvme_ctrlr1 = NULL, *nvme_ctrlr2 = NULL; 6993 struct nvme_path_id *curr_trid1, *curr_trid2; 6994 struct spdk_io_channel *ch11, *ch12, *ch21, *ch22; 6995 struct nvme_ctrlr_channel *ctrlr_ch11, *ctrlr_ch12, *ctrlr_ch21, *ctrlr_ch22; 6996 int ctrlr_op_rc; 6997 int rc; 6998 6999 ut_init_trid(&trid1); 7000 ut_init_trid2(&trid2); 7001 TAILQ_INIT(&ctrlr1.active_io_qpairs); 7002 TAILQ_INIT(&ctrlr2.active_io_qpairs); 7003 ctrlr1.cdata.cmic.multi_ctrlr = 1; 7004 ctrlr2.cdata.cmic.multi_ctrlr = 1; 7005 ctrlr1.cdata.cntlid = 1; 7006 ctrlr2.cdata.cntlid = 2; 7007 ctrlr1.adminq.is_connected = true; 7008 ctrlr2.adminq.is_connected = true; 7009 7010 set_thread(0); 7011 7012 rc = nvme_ctrlr_create(&ctrlr1, "nvme0", &trid1, NULL); 7013 CU_ASSERT(rc == 0); 7014 7015 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7016 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7017 7018 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1, UT_HOSTNQN); 7019 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 7020 7021 curr_trid1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 7022 SPDK_CU_ASSERT_FATAL(curr_trid1 != NULL); 7023 7024 ch11 = spdk_get_io_channel(nvme_ctrlr1); 7025 SPDK_CU_ASSERT_FATAL(ch11 != NULL); 7026 7027 ctrlr_ch11 = spdk_io_channel_get_ctx(ch11); 7028 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7029 7030 set_thread(1); 7031 7032 ch12 = spdk_get_io_channel(nvme_ctrlr1); 7033 SPDK_CU_ASSERT_FATAL(ch12 != NULL); 7034 7035 ctrlr_ch12 = spdk_io_channel_get_ctx(ch12); 7036 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7037 7038 set_thread(0); 7039 7040 rc = nvme_ctrlr_create(&ctrlr2, "nvme0", &trid2, NULL); 7041 CU_ASSERT(rc == 0); 7042 7043 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2, UT_HOSTNQN); 7044 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 7045 7046 curr_trid2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 7047 SPDK_CU_ASSERT_FATAL(curr_trid2 != NULL); 7048 7049 ch21 = spdk_get_io_channel(nvme_ctrlr2); 7050 SPDK_CU_ASSERT_FATAL(ch21 != NULL); 7051 7052 ctrlr_ch21 = spdk_io_channel_get_ctx(ch21); 7053 CU_ASSERT(ctrlr_ch21->qpair != NULL); 7054 7055 set_thread(1); 7056 7057 ch22 = spdk_get_io_channel(nvme_ctrlr2); 7058 SPDK_CU_ASSERT_FATAL(ch22 != NULL); 7059 7060 ctrlr_ch22 = spdk_io_channel_get_ctx(ch22); 7061 CU_ASSERT(ctrlr_ch22->qpair != NULL); 7062 7063 /* Reset starts from thread 1. */ 7064 set_thread(1); 7065 7066 nvme_ctrlr1->resetting = false; 7067 nvme_ctrlr2->resetting = false; 7068 curr_trid1->last_failed_tsc = spdk_get_ticks(); 7069 curr_trid2->last_failed_tsc = spdk_get_ticks(); 7070 ctrlr_op_rc = -1; 7071 7072 nvme_bdev_ctrlr_op_rpc(nbdev_ctrlr, NVME_CTRLR_OP_RESET, 7073 ut_ctrlr_op_rpc_cb, &ctrlr_op_rc); 7074 7075 CU_ASSERT(nvme_ctrlr1->resetting == true); 7076 CU_ASSERT(ctrlr_ch11->qpair != NULL); 7077 CU_ASSERT(ctrlr_ch12->qpair != NULL); 7078 CU_ASSERT(nvme_ctrlr2->resetting == false); 7079 7080 poll_thread_times(0, 3); 7081 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7082 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7083 7084 poll_thread_times(0, 1); 7085 poll_thread_times(1, 1); 7086 CU_ASSERT(ctrlr_ch11->qpair->qpair == NULL); 7087 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7088 7089 poll_thread_times(1, 1); 7090 poll_thread_times(0, 1); 7091 CU_ASSERT(ctrlr1.adminq.is_connected == false); 7092 7093 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7094 poll_thread_times(0, 2); 7095 CU_ASSERT(ctrlr1.adminq.is_connected == true); 7096 7097 poll_thread_times(0, 1); 7098 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7099 CU_ASSERT(ctrlr_ch12->qpair->qpair == NULL); 7100 7101 poll_thread_times(1, 1); 7102 CU_ASSERT(ctrlr_ch11->qpair->qpair != NULL); 7103 CU_ASSERT(ctrlr_ch12->qpair->qpair != NULL); 7104 CU_ASSERT(nvme_ctrlr1->resetting == true); 7105 CU_ASSERT(curr_trid1->last_failed_tsc != 0); 7106 7107 poll_thread_times(0, 2); 7108 poll_thread_times(1, 1); 7109 poll_thread_times(0, 1); 7110 poll_thread_times(1, 1); 7111 poll_thread_times(0, 1); 7112 poll_thread_times(1, 1); 7113 poll_thread_times(0, 1); 7114 7115 CU_ASSERT(nvme_ctrlr1->resetting == false); 7116 CU_ASSERT(curr_trid1->last_failed_tsc == 0); 7117 CU_ASSERT(nvme_ctrlr2->resetting == true); 7118 7119 poll_threads(); 7120 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7121 poll_threads(); 7122 7123 CU_ASSERT(nvme_ctrlr2->resetting == false); 7124 CU_ASSERT(ctrlr_op_rc == 0); 7125 7126 set_thread(1); 7127 7128 spdk_put_io_channel(ch12); 7129 spdk_put_io_channel(ch22); 7130 7131 set_thread(0); 7132 7133 spdk_put_io_channel(ch11); 7134 spdk_put_io_channel(ch21); 7135 7136 poll_threads(); 7137 7138 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7139 CU_ASSERT(rc == 0); 7140 7141 poll_threads(); 7142 spdk_delay_us(1000); 7143 poll_threads(); 7144 7145 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 7146 } 7147 7148 static void 7149 test_disable_enable_ctrlr(void) 7150 { 7151 struct spdk_nvme_transport_id trid = {}; 7152 struct spdk_nvme_ctrlr ctrlr = {}; 7153 struct nvme_ctrlr *nvme_ctrlr = NULL; 7154 struct nvme_path_id *curr_trid; 7155 struct spdk_io_channel *ch1, *ch2; 7156 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 7157 int rc; 7158 7159 ut_init_trid(&trid); 7160 TAILQ_INIT(&ctrlr.active_io_qpairs); 7161 ctrlr.adminq.is_connected = true; 7162 7163 set_thread(0); 7164 7165 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7166 CU_ASSERT(rc == 0); 7167 7168 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7169 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7170 7171 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 7172 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 7173 7174 ch1 = spdk_get_io_channel(nvme_ctrlr); 7175 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7176 7177 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 7178 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7179 7180 set_thread(1); 7181 7182 ch2 = spdk_get_io_channel(nvme_ctrlr); 7183 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7184 7185 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 7186 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7187 7188 /* Disable starts from thread 1. */ 7189 set_thread(1); 7190 7191 /* Case 1: ctrlr is already disabled. */ 7192 nvme_ctrlr->disabled = true; 7193 7194 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7195 CU_ASSERT(rc == -EALREADY); 7196 7197 /* Case 2: ctrlr is already being destructed. */ 7198 nvme_ctrlr->disabled = false; 7199 nvme_ctrlr->destruct = true; 7200 7201 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7202 CU_ASSERT(rc == -ENXIO); 7203 7204 /* Case 3: reset is in progress. */ 7205 nvme_ctrlr->destruct = false; 7206 nvme_ctrlr->resetting = true; 7207 7208 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7209 CU_ASSERT(rc == -EBUSY); 7210 7211 /* Case 4: disable completes successfully. */ 7212 nvme_ctrlr->resetting = false; 7213 7214 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7215 CU_ASSERT(rc == 0); 7216 CU_ASSERT(nvme_ctrlr->resetting == true); 7217 CU_ASSERT(ctrlr_ch1->qpair != NULL); 7218 CU_ASSERT(ctrlr_ch2->qpair != NULL); 7219 7220 poll_thread_times(0, 3); 7221 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7222 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7223 7224 poll_thread_times(0, 1); 7225 poll_thread_times(1, 1); 7226 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7227 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7228 7229 poll_thread_times(1, 1); 7230 poll_thread_times(0, 1); 7231 CU_ASSERT(ctrlr.adminq.is_connected == false); 7232 poll_thread_times(1, 1); 7233 poll_thread_times(0, 1); 7234 poll_thread_times(1, 1); 7235 poll_thread_times(0, 1); 7236 CU_ASSERT(nvme_ctrlr->resetting == false); 7237 CU_ASSERT(nvme_ctrlr->disabled == true); 7238 7239 /* Case 5: enable completes successfully. */ 7240 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7241 CU_ASSERT(rc == 0); 7242 7243 CU_ASSERT(nvme_ctrlr->resetting == true); 7244 CU_ASSERT(nvme_ctrlr->disabled == false); 7245 7246 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7247 poll_thread_times(0, 2); 7248 CU_ASSERT(ctrlr.adminq.is_connected == true); 7249 7250 poll_thread_times(0, 1); 7251 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7252 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7253 7254 poll_thread_times(1, 1); 7255 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 7256 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 7257 CU_ASSERT(nvme_ctrlr->resetting == true); 7258 7259 poll_thread_times(0, 2); 7260 CU_ASSERT(nvme_ctrlr->resetting == true); 7261 poll_thread_times(1, 1); 7262 CU_ASSERT(nvme_ctrlr->resetting == true); 7263 poll_thread_times(0, 1); 7264 CU_ASSERT(nvme_ctrlr->resetting == false); 7265 7266 /* Case 6: ctrlr is already enabled. */ 7267 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7268 CU_ASSERT(rc == -EALREADY); 7269 7270 set_thread(0); 7271 7272 /* Case 7: disable cancels delayed reconnect. */ 7273 nvme_ctrlr->opts.reconnect_delay_sec = 10; 7274 ctrlr.fail_reset = true; 7275 7276 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7277 CU_ASSERT(rc == 0); 7278 7279 poll_threads(); 7280 7281 CU_ASSERT(nvme_ctrlr->resetting == false); 7282 CU_ASSERT(ctrlr.is_failed == false); 7283 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 7284 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 7285 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 7286 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 7287 7288 rc = bdev_nvme_disable_ctrlr(nvme_ctrlr); 7289 CU_ASSERT(rc == 0); 7290 7291 CU_ASSERT(nvme_ctrlr->resetting == true); 7292 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 7293 7294 poll_threads(); 7295 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7296 poll_threads(); 7297 7298 CU_ASSERT(nvme_ctrlr->resetting == false); 7299 CU_ASSERT(nvme_ctrlr->disabled == true); 7300 7301 rc = bdev_nvme_enable_ctrlr(nvme_ctrlr); 7302 CU_ASSERT(rc == 0); 7303 7304 CU_ASSERT(nvme_ctrlr->resetting == true); 7305 CU_ASSERT(nvme_ctrlr->disabled == false); 7306 7307 poll_threads(); 7308 7309 CU_ASSERT(nvme_ctrlr->resetting == false); 7310 7311 set_thread(1); 7312 7313 spdk_put_io_channel(ch2); 7314 7315 set_thread(0); 7316 7317 spdk_put_io_channel(ch1); 7318 7319 poll_threads(); 7320 7321 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7322 CU_ASSERT(rc == 0); 7323 7324 poll_threads(); 7325 spdk_delay_us(1000); 7326 poll_threads(); 7327 7328 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7329 } 7330 7331 static void 7332 ut_delete_done(void *ctx, int rc) 7333 { 7334 int *delete_done_rc = ctx; 7335 *delete_done_rc = rc; 7336 } 7337 7338 static void 7339 test_delete_ctrlr_done(void) 7340 { 7341 struct spdk_nvme_transport_id trid = {}; 7342 struct spdk_nvme_ctrlr ctrlr = {}; 7343 int delete_done_rc = 0xDEADBEEF; 7344 int rc; 7345 7346 ut_init_trid(&trid); 7347 7348 nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 7349 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 7350 7351 rc = bdev_nvme_delete("nvme0", &g_any_path, ut_delete_done, &delete_done_rc); 7352 CU_ASSERT(rc == 0); 7353 7354 for (int i = 0; i < 20; i++) { 7355 poll_threads(); 7356 if (delete_done_rc == 0) { 7357 break; 7358 } 7359 spdk_delay_us(1000); 7360 } 7361 7362 CU_ASSERT(delete_done_rc == 0); 7363 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7364 } 7365 7366 static void 7367 test_ns_remove_during_reset(void) 7368 { 7369 struct nvme_path_id path = {}; 7370 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 7371 struct spdk_nvme_ctrlr *ctrlr; 7372 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7373 struct nvme_bdev_ctrlr *nbdev_ctrlr; 7374 struct nvme_ctrlr *nvme_ctrlr; 7375 const int STRING_SIZE = 32; 7376 const char *attached_names[STRING_SIZE]; 7377 struct nvme_bdev *bdev; 7378 struct nvme_ns *nvme_ns; 7379 union spdk_nvme_async_event_completion event = {}; 7380 struct spdk_nvme_cpl cpl = {}; 7381 int rc; 7382 7383 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 7384 ut_init_trid(&path.trid); 7385 7386 set_thread(0); 7387 7388 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 7389 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7390 7391 g_ut_attach_ctrlr_status = 0; 7392 g_ut_attach_bdev_count = 1; 7393 7394 rc = spdk_bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 7395 attach_ctrlr_done, NULL, &dopts, &opts, false); 7396 CU_ASSERT(rc == 0); 7397 7398 spdk_delay_us(1000); 7399 poll_threads(); 7400 7401 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 7402 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 7403 7404 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid, dopts.hostnqn); 7405 CU_ASSERT(nvme_ctrlr != NULL); 7406 7407 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 7408 CU_ASSERT(bdev != NULL); 7409 7410 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 7411 CU_ASSERT(nvme_ns != NULL); 7412 7413 /* If ns is removed during ctrlr reset, nvme_ns and bdev should still exist, 7414 * but nvme_ns->ns should be NULL. 7415 */ 7416 7417 CU_ASSERT(ctrlr->ns[0].is_active == true); 7418 ctrlr->ns[0].is_active = false; 7419 7420 rc = bdev_nvme_reset_ctrlr(nvme_ctrlr); 7421 CU_ASSERT(rc == 0); 7422 7423 poll_threads(); 7424 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7425 poll_threads(); 7426 7427 CU_ASSERT(nvme_ctrlr->resetting == false); 7428 CU_ASSERT(ctrlr->adminq.is_connected == true); 7429 7430 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7431 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7432 CU_ASSERT(nvme_ns->bdev == bdev); 7433 CU_ASSERT(nvme_ns->ns == NULL); 7434 7435 /* Then, async event should fill nvme_ns->ns again. */ 7436 7437 ctrlr->ns[0].is_active = true; 7438 7439 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 7440 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 7441 cpl.cdw0 = event.raw; 7442 7443 aer_cb(nvme_ctrlr, &cpl); 7444 7445 CU_ASSERT(nvme_ns == nvme_ctrlr_get_first_active_ns(nvme_ctrlr)); 7446 CU_ASSERT(bdev == nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1)); 7447 CU_ASSERT(nvme_ns->bdev == bdev); 7448 CU_ASSERT(nvme_ns->ns == &ctrlr->ns[0]); 7449 7450 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7451 CU_ASSERT(rc == 0); 7452 7453 poll_threads(); 7454 spdk_delay_us(1000); 7455 poll_threads(); 7456 7457 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7458 } 7459 7460 static void 7461 test_io_path_is_current(void) 7462 { 7463 struct nvme_bdev_channel nbdev_ch = { 7464 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 7465 }; 7466 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 7467 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 7468 struct spdk_nvme_ns ns1 = {}, ns2 = {}, ns3 = {}; 7469 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }, 7470 nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 7471 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}, ctrlr_ch3 = {}; 7472 struct nvme_qpair nvme_qpair1 = { .qpair = &qpair1, .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 7473 struct nvme_qpair nvme_qpair2 = { .qpair = &qpair2, .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 7474 struct nvme_qpair nvme_qpair3 = { .qpair = &qpair3, .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, }; 7475 struct nvme_ns nvme_ns1 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns1, }; 7476 struct nvme_ns nvme_ns2 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns2, }; 7477 struct nvme_ns nvme_ns3 = { .ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE, .ns = &ns3, }; 7478 struct nvme_io_path io_path1 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 7479 struct nvme_io_path io_path2 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 7480 struct nvme_io_path io_path3 = { .nbdev_ch = &nbdev_ch, .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 7481 7482 /* io_path1 is deleting */ 7483 io_path1.nbdev_ch = NULL; 7484 7485 CU_ASSERT(nvme_io_path_is_current(&io_path1) == false); 7486 7487 io_path1.nbdev_ch = &nbdev_ch; 7488 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 7489 io_path2.nbdev_ch = &nbdev_ch; 7490 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 7491 io_path3.nbdev_ch = &nbdev_ch; 7492 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 7493 7494 /* active/active: io_path is current if it is available and ANA optimized. */ 7495 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7496 7497 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7498 7499 /* active/active: io_path is not current if it is disconnected even if it is 7500 * ANA optimized. 7501 */ 7502 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7503 7504 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7505 7506 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7507 7508 /* active/passive: io_path is current if it is available and cached. 7509 * (only ANA optimized path is cached for active/passive.) 7510 */ 7511 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7512 nbdev_ch.current_io_path = &io_path2; 7513 7514 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7515 7516 /* active:passive: io_path is not current if it is disconnected even if it is cached */ 7517 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7518 7519 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7520 7521 qpair2.failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 7522 7523 /* active/active and active/passive: io_path is not current if it is ANA inaccessible. */ 7524 nvme_ns2.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 7525 7526 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7527 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7528 7529 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7530 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7531 7532 /* active/active: non-optimized path is current only if there is no optimized path. */ 7533 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE; 7534 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7535 7536 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7537 7538 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7539 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 7540 7541 CU_ASSERT(nvme_io_path_is_current(&io_path2) == true); 7542 7543 /* active/passive: current is true if it is the first one when there is no optimized path. */ 7544 nbdev_ch.mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE; 7545 nbdev_ch.current_io_path = NULL; 7546 7547 CU_ASSERT(nvme_io_path_is_current(&io_path1) == true); 7548 CU_ASSERT(nvme_io_path_is_current(&io_path2) == false); 7549 CU_ASSERT(nvme_io_path_is_current(&io_path3) == false); 7550 } 7551 7552 static void 7553 test_bdev_reset_abort_io(void) 7554 { 7555 struct spdk_nvme_transport_id trid = {}; 7556 struct spdk_bdev_nvme_ctrlr_opts opts = {}; 7557 struct spdk_nvme_ctrlr *ctrlr; 7558 struct spdk_nvme_ctrlr_opts dopts = {.hostnqn = UT_HOSTNQN}; 7559 struct nvme_ctrlr *nvme_ctrlr; 7560 const int STRING_SIZE = 32; 7561 const char *attached_names[STRING_SIZE]; 7562 struct nvme_bdev *bdev; 7563 struct spdk_bdev_io *write_io, *read_io, *reset_io; 7564 struct spdk_io_channel *ch1, *ch2; 7565 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 7566 struct nvme_io_path *io_path1, *io_path2; 7567 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 7568 int rc; 7569 7570 g_opts.bdev_retry_count = -1; 7571 7572 ut_init_trid(&trid); 7573 7574 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 7575 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 7576 7577 g_ut_attach_ctrlr_status = 0; 7578 g_ut_attach_bdev_count = 1; 7579 7580 set_thread(1); 7581 7582 opts.ctrlr_loss_timeout_sec = -1; 7583 opts.reconnect_delay_sec = 1; 7584 7585 rc = spdk_bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 7586 attach_ctrlr_done, NULL, &dopts, &opts, false); 7587 CU_ASSERT(rc == 0); 7588 7589 spdk_delay_us(1000); 7590 poll_threads(); 7591 7592 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 7593 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 7594 7595 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 7596 SPDK_CU_ASSERT_FATAL(bdev != NULL); 7597 7598 set_thread(0); 7599 7600 ch1 = spdk_get_io_channel(bdev); 7601 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 7602 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 7603 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 7604 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 7605 nvme_qpair1 = io_path1->qpair; 7606 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 7607 7608 set_thread(1); 7609 7610 ch2 = spdk_get_io_channel(bdev); 7611 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 7612 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 7613 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 7614 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 7615 nvme_qpair2 = io_path2->qpair; 7616 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 7617 7618 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch1); 7619 ut_bdev_io_set_buf(write_io); 7620 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 7621 7622 read_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_READ, bdev, ch1); 7623 ut_bdev_io_set_buf(read_io); 7624 read_io->internal.ch = (struct spdk_bdev_channel *)ch1; 7625 7626 reset_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 7627 7628 /* If qpair is disconnected, it is freed and then reconnected via resetting 7629 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 7630 * while resetting the nvme_ctrlr. 7631 */ 7632 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 7633 7634 poll_thread_times(0, 3); 7635 7636 CU_ASSERT(nvme_qpair1->qpair == NULL); 7637 CU_ASSERT(nvme_ctrlr->resetting == true); 7638 7639 set_thread(0); 7640 7641 write_io->internal.f.in_submit_request = true; 7642 7643 bdev_nvme_submit_request(ch1, write_io); 7644 7645 CU_ASSERT(write_io->internal.f.in_submit_request == true); 7646 CU_ASSERT(write_io == spdk_bdev_io_from_ctx(TAILQ_FIRST(&nbdev_ch1->retry_io_list))); 7647 7648 set_thread(1); 7649 7650 /* Submit a reset request to a bdev while resetting a nvme_ctrlr. 7651 * Further I/O queueing should be disabled and queued I/Os should be aborted. 7652 * Verify these behaviors. 7653 */ 7654 reset_io->internal.f.in_submit_request = true; 7655 7656 bdev_nvme_submit_request(ch2, reset_io); 7657 7658 poll_thread_times(0, 1); 7659 poll_thread_times(1, 2); 7660 7661 CU_ASSERT(nbdev_ch1->resetting == true); 7662 7663 /* qpair1 should be still disconnected. */ 7664 CU_ASSERT(nvme_qpair1->qpair == NULL); 7665 7666 set_thread(0); 7667 7668 read_io->internal.f.in_submit_request = true; 7669 7670 bdev_nvme_submit_request(ch1, read_io); 7671 7672 CU_ASSERT(nvme_qpair1->qpair == NULL); 7673 7674 poll_thread_times(0, 1); 7675 7676 /* The I/O which was submitted during bdev_reset should fail immediately. */ 7677 CU_ASSERT(read_io->internal.f.in_submit_request == false); 7678 CU_ASSERT(read_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 7679 7680 poll_threads(); 7681 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 7682 poll_threads(); 7683 7684 /* The completion of bdev_reset should ensure queued I/O is aborted. */ 7685 CU_ASSERT(write_io->internal.f.in_submit_request == false); 7686 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 7687 7688 /* The reset request itself should complete with success. */ 7689 CU_ASSERT(reset_io->internal.f.in_submit_request == false); 7690 CU_ASSERT(reset_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 7691 7692 set_thread(0); 7693 7694 spdk_put_io_channel(ch1); 7695 7696 set_thread(1); 7697 7698 spdk_put_io_channel(ch2); 7699 7700 poll_threads(); 7701 7702 set_thread(0); 7703 7704 rc = bdev_nvme_delete("nvme0", &g_any_path, NULL, NULL); 7705 CU_ASSERT(rc == 0); 7706 7707 poll_threads(); 7708 spdk_delay_us(1000); 7709 poll_threads(); 7710 7711 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 7712 7713 free(write_io); 7714 free(read_io); 7715 free(reset_io); 7716 7717 g_opts.bdev_retry_count = 0; 7718 } 7719 7720 int 7721 main(int argc, char **argv) 7722 { 7723 CU_pSuite suite = NULL; 7724 unsigned int num_failures; 7725 7726 CU_initialize_registry(); 7727 7728 suite = CU_add_suite("nvme", NULL, NULL); 7729 7730 CU_ADD_TEST(suite, test_create_ctrlr); 7731 CU_ADD_TEST(suite, test_reset_ctrlr); 7732 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 7733 CU_ADD_TEST(suite, test_failover_ctrlr); 7734 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 7735 CU_ADD_TEST(suite, test_pending_reset); 7736 CU_ADD_TEST(suite, test_attach_ctrlr); 7737 CU_ADD_TEST(suite, test_aer_cb); 7738 CU_ADD_TEST(suite, test_submit_nvme_cmd); 7739 CU_ADD_TEST(suite, test_add_remove_trid); 7740 CU_ADD_TEST(suite, test_abort); 7741 CU_ADD_TEST(suite, test_get_io_qpair); 7742 CU_ADD_TEST(suite, test_bdev_unregister); 7743 CU_ADD_TEST(suite, test_compare_ns); 7744 CU_ADD_TEST(suite, test_init_ana_log_page); 7745 CU_ADD_TEST(suite, test_get_memory_domains); 7746 CU_ADD_TEST(suite, test_reconnect_qpair); 7747 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 7748 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 7749 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 7750 CU_ADD_TEST(suite, test_admin_path); 7751 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 7752 CU_ADD_TEST(suite, test_find_io_path); 7753 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 7754 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 7755 CU_ADD_TEST(suite, test_retry_io_count); 7756 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 7757 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 7758 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 7759 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 7760 CU_ADD_TEST(suite, test_reconnect_ctrlr); 7761 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 7762 CU_ADD_TEST(suite, test_fail_path); 7763 CU_ADD_TEST(suite, test_nvme_ns_cmp); 7764 CU_ADD_TEST(suite, test_ana_transition); 7765 CU_ADD_TEST(suite, test_set_preferred_path); 7766 CU_ADD_TEST(suite, test_find_next_io_path); 7767 CU_ADD_TEST(suite, test_find_io_path_min_qd); 7768 CU_ADD_TEST(suite, test_disable_auto_failback); 7769 CU_ADD_TEST(suite, test_set_multipath_policy); 7770 CU_ADD_TEST(suite, test_uuid_generation); 7771 CU_ADD_TEST(suite, test_retry_io_to_same_path); 7772 CU_ADD_TEST(suite, test_race_between_reset_and_disconnected); 7773 CU_ADD_TEST(suite, test_ctrlr_op_rpc); 7774 CU_ADD_TEST(suite, test_bdev_ctrlr_op_rpc); 7775 CU_ADD_TEST(suite, test_disable_enable_ctrlr); 7776 CU_ADD_TEST(suite, test_delete_ctrlr_done); 7777 CU_ADD_TEST(suite, test_ns_remove_during_reset); 7778 CU_ADD_TEST(suite, test_io_path_is_current); 7779 CU_ADD_TEST(suite, test_bdev_reset_abort_io); 7780 7781 allocate_threads(3); 7782 set_thread(0); 7783 bdev_nvme_library_init(); 7784 init_accel(); 7785 7786 num_failures = spdk_ut_run_tests(argc, argv, NULL); 7787 7788 set_thread(0); 7789 bdev_nvme_library_fini(); 7790 fini_accel(); 7791 free_threads(); 7792 7793 CU_cleanup_registry(); 7794 7795 return num_failures; 7796 } 7797