1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 #include "spdk/bdev_module.h" 12 13 #include "common/lib/ut_multithread.c" 14 15 #include "bdev/nvme/bdev_nvme.c" 16 17 #include "unit/lib/json_mock.c" 18 19 #include "bdev/nvme/bdev_mdns_client.c" 20 21 static void *g_accel_p = (void *)0xdeadbeaf; 22 23 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 24 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 25 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 26 spdk_nvme_remove_cb remove_cb), NULL); 27 28 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 29 enum spdk_nvme_transport_type trtype)); 30 31 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 32 NULL); 33 34 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 35 36 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 37 struct spdk_nvme_transport_id *trid), 0); 38 39 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 40 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 41 42 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 43 44 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 45 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 46 47 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 48 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 49 50 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 51 52 DEFINE_STUB_V(spdk_jsonrpc_send_error_response, (struct spdk_jsonrpc_request *request, 53 int error_code, const char *msg)); 54 DEFINE_STUB(spdk_jsonrpc_begin_result, struct spdk_json_write_ctx *, 55 (struct spdk_jsonrpc_request *request), NULL); 56 DEFINE_STUB_V(spdk_jsonrpc_end_result, 57 (struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)); 58 59 DEFINE_STUB_V(spdk_nvme_transport_get_opts, (struct spdk_nvme_transport_opts *opts, 60 size_t opts_size)); 61 62 DEFINE_STUB(spdk_nvme_transport_set_opts, int, (const struct spdk_nvme_transport_opts *opts, 63 size_t opts_size), 0); 64 65 DEFINE_STUB(spdk_bdev_io_get_submit_tsc, uint64_t, (struct spdk_bdev_io *bdev_io), 0); 66 67 DEFINE_STUB_V(spdk_bdev_reset_io_stat, (struct spdk_bdev_io_stat *stat, 68 enum spdk_bdev_reset_stat_mode mode)); 69 DEFINE_STUB_V(spdk_bdev_add_io_stat, (struct spdk_bdev_io_stat *total, 70 struct spdk_bdev_io_stat *add)); 71 72 int 73 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 74 struct spdk_memory_domain **domains, int array_size) 75 { 76 int i, min_array_size; 77 78 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 79 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 80 for (i = 0; i < min_array_size; i++) { 81 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 82 } 83 } 84 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 85 86 return 0; 87 } 88 89 struct spdk_io_channel * 90 spdk_accel_get_io_channel(void) 91 { 92 return spdk_get_io_channel(g_accel_p); 93 } 94 95 void 96 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 97 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 98 { 99 /* Avoid warning that opts is used uninitialised */ 100 memset(opts, 0, opts_size); 101 } 102 103 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 104 (struct spdk_nvme_ctrlr *ctrlr), NULL); 105 106 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 107 (const struct spdk_nvme_ctrlr *ctrlr), 0); 108 109 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 110 (struct spdk_nvme_ctrlr *ctrlr), NULL); 111 112 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 113 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 114 115 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 116 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 117 118 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 119 120 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 121 122 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 123 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 124 125 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 126 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 127 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 128 129 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 130 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 131 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 132 133 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 134 size_t *size), 0); 135 136 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 137 138 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 139 140 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 141 142 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 143 144 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 145 146 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 147 148 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 149 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 150 151 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 152 153 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 154 char *name, size_t *size), 0); 155 156 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 157 (struct spdk_nvme_ns *ns), 0); 158 159 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 160 (const struct spdk_nvme_ctrlr *ctrlr), 0); 161 162 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 163 (struct spdk_nvme_ns *ns), 0); 164 165 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 166 (struct spdk_nvme_ns *ns), 0); 167 168 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 169 (struct spdk_nvme_ns *ns), 0); 170 171 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 172 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 173 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 174 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 175 176 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 177 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 178 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 179 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 180 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 181 182 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 183 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 184 void *payload, uint32_t payload_size, uint64_t slba, 185 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 186 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 187 188 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 189 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 190 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 191 192 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 193 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 194 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 195 196 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 197 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 198 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 199 200 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 201 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 202 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 203 204 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 205 206 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 207 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 208 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 209 210 DEFINE_STUB(spdk_nvme_cpl_get_status_type_string, const char *, 211 (const struct spdk_nvme_status *status), NULL); 212 213 DEFINE_STUB(spdk_nvme_cpl_get_status_string, const char *, 214 (const struct spdk_nvme_status *status), NULL); 215 216 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 217 218 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 219 220 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 221 222 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 223 224 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 225 226 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 227 struct iovec *iov, 228 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 229 230 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr)); 231 232 struct ut_nvme_req { 233 uint16_t opc; 234 spdk_nvme_cmd_cb cb_fn; 235 void *cb_arg; 236 struct spdk_nvme_cpl cpl; 237 TAILQ_ENTRY(ut_nvme_req) tailq; 238 }; 239 240 struct spdk_nvme_ns { 241 struct spdk_nvme_ctrlr *ctrlr; 242 uint32_t id; 243 bool is_active; 244 struct spdk_uuid *uuid; 245 enum spdk_nvme_ana_state ana_state; 246 enum spdk_nvme_csi csi; 247 }; 248 249 struct spdk_nvme_qpair { 250 struct spdk_nvme_ctrlr *ctrlr; 251 uint8_t failure_reason; 252 bool is_connected; 253 bool in_completion_context; 254 bool delete_after_completion_context; 255 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 256 uint32_t num_outstanding_reqs; 257 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 258 struct spdk_nvme_poll_group *poll_group; 259 void *poll_group_tailq_head; 260 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 261 }; 262 263 struct spdk_nvme_ctrlr { 264 uint32_t num_ns; 265 struct spdk_nvme_ns *ns; 266 struct spdk_nvme_ns_data *nsdata; 267 struct spdk_nvme_qpair adminq; 268 struct spdk_nvme_ctrlr_data cdata; 269 bool attached; 270 bool is_failed; 271 bool fail_reset; 272 bool is_removed; 273 struct spdk_nvme_transport_id trid; 274 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 275 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 276 struct spdk_nvme_ctrlr_opts opts; 277 }; 278 279 struct spdk_nvme_poll_group { 280 void *ctx; 281 struct spdk_nvme_accel_fn_table accel_fn_table; 282 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 283 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 284 }; 285 286 struct spdk_nvme_probe_ctx { 287 struct spdk_nvme_transport_id trid; 288 void *cb_ctx; 289 spdk_nvme_attach_cb attach_cb; 290 struct spdk_nvme_ctrlr *init_ctrlr; 291 }; 292 293 uint32_t 294 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 295 { 296 uint32_t nsid; 297 298 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 299 if (ctrlr->ns[nsid - 1].is_active) { 300 return nsid; 301 } 302 } 303 304 return 0; 305 } 306 307 uint32_t 308 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 309 { 310 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 311 if (ctrlr->ns[nsid - 1].is_active) { 312 return nsid; 313 } 314 } 315 316 return 0; 317 } 318 319 uint32_t 320 spdk_nvme_qpair_get_num_outstanding_reqs(struct spdk_nvme_qpair *qpair) 321 { 322 return qpair->num_outstanding_reqs; 323 } 324 325 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 326 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 327 g_ut_attached_ctrlrs); 328 static int g_ut_attach_ctrlr_status; 329 static size_t g_ut_attach_bdev_count; 330 static int g_ut_register_bdev_status; 331 static struct spdk_bdev *g_ut_registered_bdev; 332 static uint16_t g_ut_cntlid; 333 static struct nvme_path_id g_any_path = {}; 334 335 static void 336 ut_init_trid(struct spdk_nvme_transport_id *trid) 337 { 338 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 339 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 340 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 341 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 342 } 343 344 static void 345 ut_init_trid2(struct spdk_nvme_transport_id *trid) 346 { 347 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 348 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 349 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 350 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 351 } 352 353 static void 354 ut_init_trid3(struct spdk_nvme_transport_id *trid) 355 { 356 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 357 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 358 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 359 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 360 } 361 362 static int 363 cmp_int(int a, int b) 364 { 365 return a - b; 366 } 367 368 int 369 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 370 const struct spdk_nvme_transport_id *trid2) 371 { 372 int cmp; 373 374 /* We assume trtype is TCP for now. */ 375 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 376 377 cmp = cmp_int(trid1->trtype, trid2->trtype); 378 if (cmp) { 379 return cmp; 380 } 381 382 cmp = strcasecmp(trid1->traddr, trid2->traddr); 383 if (cmp) { 384 return cmp; 385 } 386 387 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 388 if (cmp) { 389 return cmp; 390 } 391 392 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 393 if (cmp) { 394 return cmp; 395 } 396 397 cmp = strcmp(trid1->subnqn, trid2->subnqn); 398 if (cmp) { 399 return cmp; 400 } 401 402 return 0; 403 } 404 405 static struct spdk_nvme_ctrlr * 406 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 407 bool ana_reporting, bool multipath) 408 { 409 struct spdk_nvme_ctrlr *ctrlr; 410 uint32_t i; 411 412 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 413 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 414 /* There is a ctrlr whose trid matches. */ 415 return NULL; 416 } 417 } 418 419 ctrlr = calloc(1, sizeof(*ctrlr)); 420 if (ctrlr == NULL) { 421 return NULL; 422 } 423 424 ctrlr->attached = true; 425 ctrlr->adminq.ctrlr = ctrlr; 426 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 427 ctrlr->adminq.is_connected = true; 428 429 if (num_ns != 0) { 430 ctrlr->num_ns = num_ns; 431 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 432 if (ctrlr->ns == NULL) { 433 free(ctrlr); 434 return NULL; 435 } 436 437 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 438 if (ctrlr->nsdata == NULL) { 439 free(ctrlr->ns); 440 free(ctrlr); 441 return NULL; 442 } 443 444 for (i = 0; i < num_ns; i++) { 445 ctrlr->ns[i].id = i + 1; 446 ctrlr->ns[i].ctrlr = ctrlr; 447 ctrlr->ns[i].is_active = true; 448 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 449 ctrlr->nsdata[i].nsze = 1024; 450 ctrlr->nsdata[i].nmic.can_share = multipath; 451 } 452 453 ctrlr->cdata.nn = num_ns; 454 ctrlr->cdata.mnan = num_ns; 455 ctrlr->cdata.nanagrpid = num_ns; 456 } 457 458 ctrlr->cdata.cntlid = ++g_ut_cntlid; 459 ctrlr->cdata.cmic.multi_ctrlr = multipath; 460 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 461 ctrlr->trid = *trid; 462 TAILQ_INIT(&ctrlr->active_io_qpairs); 463 464 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 465 466 return ctrlr; 467 } 468 469 static void 470 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 471 { 472 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 473 474 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 475 free(ctrlr->nsdata); 476 free(ctrlr->ns); 477 free(ctrlr); 478 } 479 480 static int 481 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 482 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 483 { 484 struct ut_nvme_req *req; 485 486 req = calloc(1, sizeof(*req)); 487 if (req == NULL) { 488 return -ENOMEM; 489 } 490 491 req->opc = opc; 492 req->cb_fn = cb_fn; 493 req->cb_arg = cb_arg; 494 495 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 496 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 497 498 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 499 qpair->num_outstanding_reqs++; 500 501 return 0; 502 } 503 504 static struct ut_nvme_req * 505 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 506 { 507 struct ut_nvme_req *req; 508 509 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 510 if (req->cb_arg == cb_arg) { 511 break; 512 } 513 } 514 515 return req; 516 } 517 518 static struct spdk_bdev_io * 519 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 520 struct spdk_io_channel *ch) 521 { 522 struct spdk_bdev_io *bdev_io; 523 524 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 525 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 526 bdev_io->type = type; 527 bdev_io->bdev = &nbdev->disk; 528 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 529 530 return bdev_io; 531 } 532 533 static void 534 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 535 { 536 bdev_io->u.bdev.iovs = &bdev_io->iov; 537 bdev_io->u.bdev.iovcnt = 1; 538 539 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 540 bdev_io->iov.iov_len = 4096; 541 } 542 543 static void 544 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 545 { 546 if (ctrlr->is_failed) { 547 free(ctrlr); 548 return; 549 } 550 551 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 552 if (probe_ctx->cb_ctx) { 553 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 554 } 555 556 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 557 558 if (probe_ctx->attach_cb) { 559 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 560 } 561 } 562 563 int 564 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 565 { 566 struct spdk_nvme_ctrlr *ctrlr, *tmp; 567 568 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 569 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 570 continue; 571 } 572 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 573 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 574 } 575 576 free(probe_ctx); 577 578 return 0; 579 } 580 581 struct spdk_nvme_probe_ctx * 582 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 583 const struct spdk_nvme_ctrlr_opts *opts, 584 spdk_nvme_attach_cb attach_cb) 585 { 586 struct spdk_nvme_probe_ctx *probe_ctx; 587 588 if (trid == NULL) { 589 return NULL; 590 } 591 592 probe_ctx = calloc(1, sizeof(*probe_ctx)); 593 if (probe_ctx == NULL) { 594 return NULL; 595 } 596 597 probe_ctx->trid = *trid; 598 probe_ctx->cb_ctx = (void *)opts; 599 probe_ctx->attach_cb = attach_cb; 600 601 return probe_ctx; 602 } 603 604 int 605 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 606 { 607 if (ctrlr->attached) { 608 ut_detach_ctrlr(ctrlr); 609 } 610 611 return 0; 612 } 613 614 int 615 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 616 { 617 SPDK_CU_ASSERT_FATAL(ctx != NULL); 618 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 619 620 return 0; 621 } 622 623 int 624 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 625 { 626 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 627 } 628 629 void 630 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 631 { 632 memset(opts, 0, opts_size); 633 634 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 635 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 636 } 637 638 const struct spdk_nvme_ctrlr_data * 639 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 640 { 641 return &ctrlr->cdata; 642 } 643 644 uint32_t 645 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 646 { 647 return ctrlr->num_ns; 648 } 649 650 struct spdk_nvme_ns * 651 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 652 { 653 if (nsid < 1 || nsid > ctrlr->num_ns) { 654 return NULL; 655 } 656 657 return &ctrlr->ns[nsid - 1]; 658 } 659 660 bool 661 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 662 { 663 if (nsid < 1 || nsid > ctrlr->num_ns) { 664 return false; 665 } 666 667 return ctrlr->ns[nsid - 1].is_active; 668 } 669 670 union spdk_nvme_csts_register 671 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 672 { 673 union spdk_nvme_csts_register csts; 674 675 csts.raw = 0; 676 677 return csts; 678 } 679 680 union spdk_nvme_vs_register 681 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 682 { 683 union spdk_nvme_vs_register vs; 684 685 vs.raw = 0; 686 687 return vs; 688 } 689 690 struct spdk_nvme_qpair * 691 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 692 const struct spdk_nvme_io_qpair_opts *user_opts, 693 size_t opts_size) 694 { 695 struct spdk_nvme_qpair *qpair; 696 697 qpair = calloc(1, sizeof(*qpair)); 698 if (qpair == NULL) { 699 return NULL; 700 } 701 702 qpair->ctrlr = ctrlr; 703 TAILQ_INIT(&qpair->outstanding_reqs); 704 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 705 706 return qpair; 707 } 708 709 static void 710 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 711 { 712 struct spdk_nvme_poll_group *group = qpair->poll_group; 713 714 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 715 716 qpair->poll_group_tailq_head = &group->connected_qpairs; 717 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 718 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 719 } 720 721 static void 722 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 723 { 724 struct spdk_nvme_poll_group *group = qpair->poll_group; 725 726 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 727 728 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 729 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 730 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 731 } 732 733 int 734 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 735 struct spdk_nvme_qpair *qpair) 736 { 737 if (qpair->is_connected) { 738 return -EISCONN; 739 } 740 741 qpair->is_connected = true; 742 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 743 744 if (qpair->poll_group) { 745 nvme_poll_group_connect_qpair(qpair); 746 } 747 748 return 0; 749 } 750 751 void 752 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 753 { 754 if (!qpair->is_connected) { 755 return; 756 } 757 758 qpair->is_connected = false; 759 760 if (qpair->poll_group != NULL) { 761 nvme_poll_group_disconnect_qpair(qpair); 762 } 763 } 764 765 int 766 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 767 { 768 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 769 770 if (qpair->in_completion_context) { 771 qpair->delete_after_completion_context = true; 772 return 0; 773 } 774 775 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 776 777 if (qpair->poll_group != NULL) { 778 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 779 } 780 781 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 782 783 CU_ASSERT(qpair->num_outstanding_reqs == 0); 784 785 free(qpair); 786 787 return 0; 788 } 789 790 int 791 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 792 { 793 if (ctrlr->fail_reset) { 794 ctrlr->is_failed = true; 795 return -EIO; 796 } 797 798 ctrlr->adminq.is_connected = true; 799 return 0; 800 } 801 802 void 803 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 804 { 805 } 806 807 int 808 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 809 { 810 if (ctrlr->is_removed) { 811 return -ENXIO; 812 } 813 814 ctrlr->adminq.is_connected = false; 815 ctrlr->is_failed = false; 816 817 return 0; 818 } 819 820 void 821 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 822 { 823 ctrlr->is_failed = true; 824 } 825 826 bool 827 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 828 { 829 return ctrlr->is_failed; 830 } 831 832 spdk_nvme_qp_failure_reason 833 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 834 { 835 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 836 } 837 838 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 839 sizeof(uint32_t)) 840 static void 841 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 842 { 843 struct spdk_nvme_ana_page ana_hdr; 844 char _ana_desc[UT_ANA_DESC_SIZE]; 845 struct spdk_nvme_ana_group_descriptor *ana_desc; 846 struct spdk_nvme_ns *ns; 847 uint32_t i; 848 849 memset(&ana_hdr, 0, sizeof(ana_hdr)); 850 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 851 852 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 853 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 854 855 buf += sizeof(ana_hdr); 856 length -= sizeof(ana_hdr); 857 858 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 859 860 for (i = 0; i < ctrlr->num_ns; i++) { 861 ns = &ctrlr->ns[i]; 862 863 if (!ns->is_active) { 864 continue; 865 } 866 867 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 868 869 ana_desc->ana_group_id = ns->id; 870 ana_desc->num_of_nsid = 1; 871 ana_desc->ana_state = ns->ana_state; 872 ana_desc->nsid[0] = ns->id; 873 874 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 875 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 876 877 buf += UT_ANA_DESC_SIZE; 878 length -= UT_ANA_DESC_SIZE; 879 } 880 } 881 882 int 883 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 884 uint8_t log_page, uint32_t nsid, 885 void *payload, uint32_t payload_size, 886 uint64_t offset, 887 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 888 { 889 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 890 SPDK_CU_ASSERT_FATAL(offset == 0); 891 ut_create_ana_log_page(ctrlr, payload, payload_size); 892 } 893 894 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 895 cb_fn, cb_arg); 896 } 897 898 int 899 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 900 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 901 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 902 { 903 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 904 } 905 906 int 907 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 908 void *cmd_cb_arg, 909 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 910 { 911 struct ut_nvme_req *req = NULL, *abort_req; 912 913 if (qpair == NULL) { 914 qpair = &ctrlr->adminq; 915 } 916 917 abort_req = calloc(1, sizeof(*abort_req)); 918 if (abort_req == NULL) { 919 return -ENOMEM; 920 } 921 922 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 923 if (req->cb_arg == cmd_cb_arg) { 924 break; 925 } 926 } 927 928 if (req == NULL) { 929 free(abort_req); 930 return -ENOENT; 931 } 932 933 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 934 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 935 936 abort_req->opc = SPDK_NVME_OPC_ABORT; 937 abort_req->cb_fn = cb_fn; 938 abort_req->cb_arg = cb_arg; 939 940 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 941 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 942 abort_req->cpl.cdw0 = 0; 943 944 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 945 ctrlr->adminq.num_outstanding_reqs++; 946 947 return 0; 948 } 949 950 int32_t 951 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 952 { 953 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 954 } 955 956 uint32_t 957 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 958 { 959 return ns->id; 960 } 961 962 struct spdk_nvme_ctrlr * 963 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 964 { 965 return ns->ctrlr; 966 } 967 968 static inline struct spdk_nvme_ns_data * 969 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 970 { 971 return &ns->ctrlr->nsdata[ns->id - 1]; 972 } 973 974 const struct spdk_nvme_ns_data * 975 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 976 { 977 return _nvme_ns_get_data(ns); 978 } 979 980 uint64_t 981 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 982 { 983 return _nvme_ns_get_data(ns)->nsze; 984 } 985 986 const struct spdk_uuid * 987 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 988 { 989 return ns->uuid; 990 } 991 992 enum spdk_nvme_csi 993 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 994 return ns->csi; 995 } 996 997 int 998 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 999 void *metadata, uint64_t lba, uint32_t lba_count, 1000 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1001 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1002 { 1003 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1004 } 1005 1006 int 1007 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1008 void *buffer, void *metadata, uint64_t lba, 1009 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1010 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 1011 { 1012 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1013 } 1014 1015 int 1016 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1017 uint64_t lba, uint32_t lba_count, 1018 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1019 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1020 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1021 uint16_t apptag_mask, uint16_t apptag) 1022 { 1023 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1024 } 1025 1026 int 1027 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1028 uint64_t lba, uint32_t lba_count, 1029 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1030 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1031 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 1032 uint16_t apptag_mask, uint16_t apptag) 1033 { 1034 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1035 } 1036 1037 static bool g_ut_readv_ext_called; 1038 int 1039 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1040 uint64_t lba, uint32_t lba_count, 1041 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1042 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1043 spdk_nvme_req_next_sge_cb next_sge_fn, 1044 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1045 { 1046 g_ut_readv_ext_called = true; 1047 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1048 } 1049 1050 static bool g_ut_writev_ext_called; 1051 int 1052 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1053 uint64_t lba, uint32_t lba_count, 1054 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1055 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1056 spdk_nvme_req_next_sge_cb next_sge_fn, 1057 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1058 { 1059 g_ut_writev_ext_called = true; 1060 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1061 } 1062 1063 int 1064 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1065 uint64_t lba, uint32_t lba_count, 1066 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1067 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1068 spdk_nvme_req_next_sge_cb next_sge_fn, 1069 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1070 { 1071 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1072 } 1073 1074 int 1075 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1076 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1077 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1078 { 1079 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1080 } 1081 1082 int 1083 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1084 uint64_t lba, uint32_t lba_count, 1085 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1086 uint32_t io_flags) 1087 { 1088 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1089 } 1090 1091 int 1092 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1093 const struct spdk_nvme_scc_source_range *ranges, 1094 uint16_t num_ranges, uint64_t dest_lba, 1095 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1096 { 1097 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1098 } 1099 1100 struct spdk_nvme_poll_group * 1101 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1102 { 1103 struct spdk_nvme_poll_group *group; 1104 1105 group = calloc(1, sizeof(*group)); 1106 if (group == NULL) { 1107 return NULL; 1108 } 1109 1110 group->ctx = ctx; 1111 if (table != NULL) { 1112 group->accel_fn_table = *table; 1113 } 1114 TAILQ_INIT(&group->connected_qpairs); 1115 TAILQ_INIT(&group->disconnected_qpairs); 1116 1117 return group; 1118 } 1119 1120 int 1121 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1122 { 1123 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1124 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1125 return -EBUSY; 1126 } 1127 1128 free(group); 1129 1130 return 0; 1131 } 1132 1133 spdk_nvme_qp_failure_reason 1134 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1135 { 1136 return qpair->failure_reason; 1137 } 1138 1139 int32_t 1140 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1141 uint32_t max_completions) 1142 { 1143 struct ut_nvme_req *req, *tmp; 1144 uint32_t num_completions = 0; 1145 1146 if (!qpair->is_connected) { 1147 return -ENXIO; 1148 } 1149 1150 qpair->in_completion_context = true; 1151 1152 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1153 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1154 qpair->num_outstanding_reqs--; 1155 1156 req->cb_fn(req->cb_arg, &req->cpl); 1157 1158 free(req); 1159 num_completions++; 1160 } 1161 1162 qpair->in_completion_context = false; 1163 if (qpair->delete_after_completion_context) { 1164 spdk_nvme_ctrlr_free_io_qpair(qpair); 1165 } 1166 1167 return num_completions; 1168 } 1169 1170 int64_t 1171 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1172 uint32_t completions_per_qpair, 1173 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1174 { 1175 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1176 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1177 1178 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1179 1180 if (disconnected_qpair_cb == NULL) { 1181 return -EINVAL; 1182 } 1183 1184 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1185 disconnected_qpair_cb(qpair, group->ctx); 1186 } 1187 1188 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1189 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1190 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1191 /* Bump the number of completions so this counts as "busy" */ 1192 num_completions++; 1193 continue; 1194 } 1195 1196 local_completions = spdk_nvme_qpair_process_completions(qpair, 1197 completions_per_qpair); 1198 if (local_completions < 0 && error_reason == 0) { 1199 error_reason = local_completions; 1200 } else { 1201 num_completions += local_completions; 1202 assert(num_completions >= 0); 1203 } 1204 } 1205 1206 return error_reason ? error_reason : num_completions; 1207 } 1208 1209 int 1210 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1211 struct spdk_nvme_qpair *qpair) 1212 { 1213 CU_ASSERT(!qpair->is_connected); 1214 1215 qpair->poll_group = group; 1216 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1217 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1218 1219 return 0; 1220 } 1221 1222 int 1223 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1224 struct spdk_nvme_qpair *qpair) 1225 { 1226 CU_ASSERT(!qpair->is_connected); 1227 1228 if (qpair->poll_group == NULL) { 1229 return -ENOENT; 1230 } 1231 1232 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1233 1234 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1235 1236 qpair->poll_group = NULL; 1237 qpair->poll_group_tailq_head = NULL; 1238 1239 return 0; 1240 } 1241 1242 int 1243 spdk_bdev_register(struct spdk_bdev *bdev) 1244 { 1245 g_ut_registered_bdev = bdev; 1246 1247 return g_ut_register_bdev_status; 1248 } 1249 1250 void 1251 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1252 { 1253 int rc; 1254 1255 rc = bdev->fn_table->destruct(bdev->ctxt); 1256 1257 if (bdev == g_ut_registered_bdev) { 1258 g_ut_registered_bdev = NULL; 1259 } 1260 1261 if (rc <= 0 && cb_fn != NULL) { 1262 cb_fn(cb_arg, rc); 1263 } 1264 } 1265 1266 int 1267 spdk_bdev_open_ext(const char *bdev_name, bool write, 1268 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1269 struct spdk_bdev_desc **desc) 1270 { 1271 if (g_ut_registered_bdev == NULL || 1272 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1273 return -ENODEV; 1274 } 1275 1276 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1277 1278 return 0; 1279 } 1280 1281 struct spdk_bdev * 1282 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1283 { 1284 return (struct spdk_bdev *)desc; 1285 } 1286 1287 int 1288 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1289 { 1290 bdev->blockcnt = size; 1291 1292 return 0; 1293 } 1294 1295 struct spdk_io_channel * 1296 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1297 { 1298 return (struct spdk_io_channel *)bdev_io->internal.ch; 1299 } 1300 1301 void 1302 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1303 { 1304 bdev_io->internal.status = status; 1305 bdev_io->internal.in_submit_request = false; 1306 } 1307 1308 void 1309 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1310 { 1311 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1312 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1313 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1314 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1315 } else { 1316 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1317 } 1318 1319 bdev_io->internal.error.nvme.cdw0 = cdw0; 1320 bdev_io->internal.error.nvme.sct = sct; 1321 bdev_io->internal.error.nvme.sc = sc; 1322 1323 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1324 } 1325 1326 void 1327 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1328 { 1329 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1330 1331 ut_bdev_io_set_buf(bdev_io); 1332 1333 cb(ch, bdev_io, true); 1334 } 1335 1336 static void 1337 test_create_ctrlr(void) 1338 { 1339 struct spdk_nvme_transport_id trid = {}; 1340 struct spdk_nvme_ctrlr ctrlr = {}; 1341 int rc; 1342 1343 ut_init_trid(&trid); 1344 1345 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1346 CU_ASSERT(rc == 0); 1347 1348 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1349 1350 rc = bdev_nvme_delete("nvme0", &g_any_path); 1351 CU_ASSERT(rc == 0); 1352 1353 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1354 1355 poll_threads(); 1356 spdk_delay_us(1000); 1357 poll_threads(); 1358 1359 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1360 } 1361 1362 static void 1363 ut_check_hotplug_on_reset(void *cb_arg, bool success) 1364 { 1365 bool *detect_remove = cb_arg; 1366 1367 CU_ASSERT(success == false); 1368 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1369 1370 *detect_remove = true; 1371 } 1372 1373 static void 1374 test_reset_ctrlr(void) 1375 { 1376 struct spdk_nvme_transport_id trid = {}; 1377 struct spdk_nvme_ctrlr ctrlr = {}; 1378 struct nvme_ctrlr *nvme_ctrlr = NULL; 1379 struct nvme_path_id *curr_trid; 1380 struct spdk_io_channel *ch1, *ch2; 1381 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1382 bool detect_remove; 1383 int rc; 1384 1385 ut_init_trid(&trid); 1386 TAILQ_INIT(&ctrlr.active_io_qpairs); 1387 1388 set_thread(0); 1389 1390 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1391 CU_ASSERT(rc == 0); 1392 1393 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1394 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1395 1396 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1397 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1398 1399 ch1 = spdk_get_io_channel(nvme_ctrlr); 1400 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1401 1402 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1403 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1404 1405 set_thread(1); 1406 1407 ch2 = spdk_get_io_channel(nvme_ctrlr); 1408 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1409 1410 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1411 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1412 1413 /* Reset starts from thread 1. */ 1414 set_thread(1); 1415 1416 /* Case 1: ctrlr is already being destructed. */ 1417 nvme_ctrlr->destruct = true; 1418 1419 rc = bdev_nvme_reset(nvme_ctrlr); 1420 CU_ASSERT(rc == -ENXIO); 1421 1422 /* Case 2: reset is in progress. */ 1423 nvme_ctrlr->destruct = false; 1424 nvme_ctrlr->resetting = true; 1425 1426 rc = bdev_nvme_reset(nvme_ctrlr); 1427 CU_ASSERT(rc == -EBUSY); 1428 1429 /* Case 3: reset completes successfully. */ 1430 nvme_ctrlr->resetting = false; 1431 curr_trid->is_failed = true; 1432 ctrlr.is_failed = true; 1433 1434 rc = bdev_nvme_reset(nvme_ctrlr); 1435 CU_ASSERT(rc == 0); 1436 CU_ASSERT(nvme_ctrlr->resetting == true); 1437 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1438 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1439 1440 poll_thread_times(0, 3); 1441 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1442 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1443 1444 poll_thread_times(0, 1); 1445 poll_thread_times(1, 1); 1446 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1447 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1448 CU_ASSERT(ctrlr.is_failed == true); 1449 1450 poll_thread_times(1, 1); 1451 poll_thread_times(0, 1); 1452 CU_ASSERT(ctrlr.is_failed == false); 1453 CU_ASSERT(ctrlr.adminq.is_connected == false); 1454 1455 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1456 poll_thread_times(0, 2); 1457 CU_ASSERT(ctrlr.adminq.is_connected == true); 1458 1459 poll_thread_times(0, 1); 1460 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1461 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1462 1463 poll_thread_times(1, 1); 1464 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1465 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1466 CU_ASSERT(nvme_ctrlr->resetting == true); 1467 CU_ASSERT(curr_trid->is_failed == true); 1468 1469 poll_thread_times(0, 2); 1470 CU_ASSERT(nvme_ctrlr->resetting == true); 1471 poll_thread_times(1, 1); 1472 CU_ASSERT(nvme_ctrlr->resetting == true); 1473 poll_thread_times(0, 1); 1474 CU_ASSERT(nvme_ctrlr->resetting == false); 1475 CU_ASSERT(curr_trid->is_failed == false); 1476 1477 /* Case 4: ctrlr is already removed. */ 1478 ctrlr.is_removed = true; 1479 1480 rc = bdev_nvme_reset(nvme_ctrlr); 1481 CU_ASSERT(rc == 0); 1482 1483 detect_remove = false; 1484 nvme_ctrlr->reset_cb_fn = ut_check_hotplug_on_reset; 1485 nvme_ctrlr->reset_cb_arg = &detect_remove; 1486 1487 poll_threads(); 1488 1489 CU_ASSERT(nvme_ctrlr->reset_cb_fn == NULL); 1490 CU_ASSERT(nvme_ctrlr->reset_cb_arg == NULL); 1491 CU_ASSERT(detect_remove == true); 1492 1493 ctrlr.is_removed = false; 1494 1495 spdk_put_io_channel(ch2); 1496 1497 set_thread(0); 1498 1499 spdk_put_io_channel(ch1); 1500 1501 poll_threads(); 1502 1503 rc = bdev_nvme_delete("nvme0", &g_any_path); 1504 CU_ASSERT(rc == 0); 1505 1506 poll_threads(); 1507 spdk_delay_us(1000); 1508 poll_threads(); 1509 1510 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1511 } 1512 1513 static void 1514 test_race_between_reset_and_destruct_ctrlr(void) 1515 { 1516 struct spdk_nvme_transport_id trid = {}; 1517 struct spdk_nvme_ctrlr ctrlr = {}; 1518 struct nvme_ctrlr *nvme_ctrlr; 1519 struct spdk_io_channel *ch1, *ch2; 1520 int rc; 1521 1522 ut_init_trid(&trid); 1523 TAILQ_INIT(&ctrlr.active_io_qpairs); 1524 1525 set_thread(0); 1526 1527 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1528 CU_ASSERT(rc == 0); 1529 1530 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1531 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1532 1533 ch1 = spdk_get_io_channel(nvme_ctrlr); 1534 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1535 1536 set_thread(1); 1537 1538 ch2 = spdk_get_io_channel(nvme_ctrlr); 1539 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1540 1541 /* Reset starts from thread 1. */ 1542 set_thread(1); 1543 1544 rc = bdev_nvme_reset(nvme_ctrlr); 1545 CU_ASSERT(rc == 0); 1546 CU_ASSERT(nvme_ctrlr->resetting == true); 1547 1548 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1549 set_thread(0); 1550 1551 rc = bdev_nvme_delete("nvme0", &g_any_path); 1552 CU_ASSERT(rc == 0); 1553 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1554 CU_ASSERT(nvme_ctrlr->destruct == true); 1555 CU_ASSERT(nvme_ctrlr->resetting == true); 1556 1557 poll_threads(); 1558 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1559 poll_threads(); 1560 1561 /* Reset completed but ctrlr is not still destructed yet. */ 1562 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1563 CU_ASSERT(nvme_ctrlr->destruct == true); 1564 CU_ASSERT(nvme_ctrlr->resetting == false); 1565 1566 /* New reset request is rejected. */ 1567 rc = bdev_nvme_reset(nvme_ctrlr); 1568 CU_ASSERT(rc == -ENXIO); 1569 1570 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1571 * However there are two channels and destruct is not completed yet. 1572 */ 1573 poll_threads(); 1574 1575 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1576 1577 set_thread(0); 1578 1579 spdk_put_io_channel(ch1); 1580 1581 set_thread(1); 1582 1583 spdk_put_io_channel(ch2); 1584 1585 poll_threads(); 1586 spdk_delay_us(1000); 1587 poll_threads(); 1588 1589 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1590 } 1591 1592 static void 1593 test_failover_ctrlr(void) 1594 { 1595 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1596 struct spdk_nvme_ctrlr ctrlr = {}; 1597 struct nvme_ctrlr *nvme_ctrlr = NULL; 1598 struct nvme_path_id *curr_trid, *next_trid; 1599 struct spdk_io_channel *ch1, *ch2; 1600 int rc; 1601 1602 ut_init_trid(&trid1); 1603 ut_init_trid2(&trid2); 1604 TAILQ_INIT(&ctrlr.active_io_qpairs); 1605 1606 set_thread(0); 1607 1608 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1609 CU_ASSERT(rc == 0); 1610 1611 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1612 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1613 1614 ch1 = spdk_get_io_channel(nvme_ctrlr); 1615 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1616 1617 set_thread(1); 1618 1619 ch2 = spdk_get_io_channel(nvme_ctrlr); 1620 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1621 1622 /* First, test one trid case. */ 1623 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1624 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1625 1626 /* Failover starts from thread 1. */ 1627 set_thread(1); 1628 1629 /* Case 1: ctrlr is already being destructed. */ 1630 nvme_ctrlr->destruct = true; 1631 1632 rc = bdev_nvme_failover(nvme_ctrlr, false); 1633 CU_ASSERT(rc == -ENXIO); 1634 CU_ASSERT(curr_trid->is_failed == false); 1635 1636 /* Case 2: reset is in progress. */ 1637 nvme_ctrlr->destruct = false; 1638 nvme_ctrlr->resetting = true; 1639 1640 rc = bdev_nvme_failover(nvme_ctrlr, false); 1641 CU_ASSERT(rc == -EBUSY); 1642 1643 /* Case 3: reset completes successfully. */ 1644 nvme_ctrlr->resetting = false; 1645 1646 rc = bdev_nvme_failover(nvme_ctrlr, false); 1647 CU_ASSERT(rc == 0); 1648 1649 CU_ASSERT(nvme_ctrlr->resetting == true); 1650 CU_ASSERT(curr_trid->is_failed == true); 1651 1652 poll_threads(); 1653 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1654 poll_threads(); 1655 1656 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1657 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1658 1659 CU_ASSERT(nvme_ctrlr->resetting == false); 1660 CU_ASSERT(curr_trid->is_failed == false); 1661 1662 set_thread(0); 1663 1664 /* Second, test two trids case. */ 1665 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1666 CU_ASSERT(rc == 0); 1667 1668 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1669 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1670 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1671 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1672 1673 /* Failover starts from thread 1. */ 1674 set_thread(1); 1675 1676 /* Case 4: reset is in progress. */ 1677 nvme_ctrlr->resetting = true; 1678 1679 rc = bdev_nvme_failover(nvme_ctrlr, false); 1680 CU_ASSERT(rc == -EBUSY); 1681 1682 /* Case 5: failover completes successfully. */ 1683 nvme_ctrlr->resetting = false; 1684 1685 rc = bdev_nvme_failover(nvme_ctrlr, false); 1686 CU_ASSERT(rc == 0); 1687 1688 CU_ASSERT(nvme_ctrlr->resetting == true); 1689 1690 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1691 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1692 CU_ASSERT(next_trid != curr_trid); 1693 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1694 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1695 1696 poll_threads(); 1697 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1698 poll_threads(); 1699 1700 CU_ASSERT(nvme_ctrlr->resetting == false); 1701 1702 spdk_put_io_channel(ch2); 1703 1704 set_thread(0); 1705 1706 spdk_put_io_channel(ch1); 1707 1708 poll_threads(); 1709 1710 rc = bdev_nvme_delete("nvme0", &g_any_path); 1711 CU_ASSERT(rc == 0); 1712 1713 poll_threads(); 1714 spdk_delay_us(1000); 1715 poll_threads(); 1716 1717 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1718 } 1719 1720 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1721 * 1722 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1723 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1724 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1725 * have been active, i.e., the head of the list until the failover completed. 1726 * However trid3 was inserted to the head of the list by mistake. 1727 * 1728 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1729 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1730 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1731 * may be executed repeatedly before failover is executed. Hence this bug is real. 1732 * 1733 * The following test verifies the fix. 1734 */ 1735 static void 1736 test_race_between_failover_and_add_secondary_trid(void) 1737 { 1738 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1739 struct spdk_nvme_ctrlr ctrlr = {}; 1740 struct nvme_ctrlr *nvme_ctrlr = NULL; 1741 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1742 struct spdk_io_channel *ch1, *ch2; 1743 int rc; 1744 1745 ut_init_trid(&trid1); 1746 ut_init_trid2(&trid2); 1747 ut_init_trid3(&trid3); 1748 TAILQ_INIT(&ctrlr.active_io_qpairs); 1749 1750 set_thread(0); 1751 1752 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1753 CU_ASSERT(rc == 0); 1754 1755 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1756 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1757 1758 ch1 = spdk_get_io_channel(nvme_ctrlr); 1759 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1760 1761 set_thread(1); 1762 1763 ch2 = spdk_get_io_channel(nvme_ctrlr); 1764 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1765 1766 set_thread(0); 1767 1768 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1769 CU_ASSERT(rc == 0); 1770 1771 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1772 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1773 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1774 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1775 path_id2 = TAILQ_NEXT(path_id1, link); 1776 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1777 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1778 1779 ctrlr.fail_reset = true; 1780 1781 rc = bdev_nvme_reset(nvme_ctrlr); 1782 CU_ASSERT(rc == 0); 1783 1784 poll_threads(); 1785 1786 CU_ASSERT(path_id1->is_failed == true); 1787 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1788 1789 rc = bdev_nvme_reset(nvme_ctrlr); 1790 CU_ASSERT(rc == 0); 1791 1792 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1793 CU_ASSERT(rc == 0); 1794 1795 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1796 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1797 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1798 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1799 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1800 path_id3 = TAILQ_NEXT(path_id2, link); 1801 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1802 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1803 1804 poll_threads(); 1805 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1806 poll_threads(); 1807 1808 spdk_put_io_channel(ch1); 1809 1810 set_thread(1); 1811 1812 spdk_put_io_channel(ch2); 1813 1814 poll_threads(); 1815 1816 set_thread(0); 1817 1818 rc = bdev_nvme_delete("nvme0", &g_any_path); 1819 CU_ASSERT(rc == 0); 1820 1821 poll_threads(); 1822 spdk_delay_us(1000); 1823 poll_threads(); 1824 1825 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1826 } 1827 1828 static void 1829 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1830 { 1831 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1832 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1833 } 1834 1835 static void 1836 test_pending_reset(void) 1837 { 1838 struct spdk_nvme_transport_id trid = {}; 1839 struct spdk_nvme_ctrlr *ctrlr; 1840 struct nvme_ctrlr *nvme_ctrlr = NULL; 1841 const int STRING_SIZE = 32; 1842 const char *attached_names[STRING_SIZE]; 1843 struct nvme_bdev *bdev; 1844 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1845 struct spdk_io_channel *ch1, *ch2; 1846 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1847 struct nvme_io_path *io_path1, *io_path2; 1848 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1849 int rc; 1850 1851 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1852 ut_init_trid(&trid); 1853 1854 set_thread(0); 1855 1856 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1857 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1858 1859 g_ut_attach_ctrlr_status = 0; 1860 g_ut_attach_bdev_count = 1; 1861 1862 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1863 attach_ctrlr_done, NULL, NULL, NULL, false); 1864 CU_ASSERT(rc == 0); 1865 1866 spdk_delay_us(1000); 1867 poll_threads(); 1868 1869 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1870 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1871 1872 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1873 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1874 1875 ch1 = spdk_get_io_channel(bdev); 1876 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1877 1878 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1879 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1880 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1881 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1882 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1883 1884 set_thread(1); 1885 1886 ch2 = spdk_get_io_channel(bdev); 1887 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1888 1889 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1890 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1891 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1892 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1893 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1894 1895 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1896 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1897 1898 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1899 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1900 1901 /* The first reset request is submitted on thread 1, and the second reset request 1902 * is submitted on thread 0 while processing the first request. 1903 */ 1904 bdev_nvme_submit_request(ch2, first_bdev_io); 1905 CU_ASSERT(nvme_ctrlr->resetting == true); 1906 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1907 1908 set_thread(0); 1909 1910 bdev_nvme_submit_request(ch1, second_bdev_io); 1911 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1912 1913 poll_threads(); 1914 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1915 poll_threads(); 1916 1917 CU_ASSERT(nvme_ctrlr->resetting == false); 1918 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1919 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1920 1921 /* The first reset request is submitted on thread 1, and the second reset request 1922 * is submitted on thread 0 while processing the first request. 1923 * 1924 * The difference from the above scenario is that the controller is removed while 1925 * processing the first request. Hence both reset requests should fail. 1926 */ 1927 set_thread(1); 1928 1929 bdev_nvme_submit_request(ch2, first_bdev_io); 1930 CU_ASSERT(nvme_ctrlr->resetting == true); 1931 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1932 1933 set_thread(0); 1934 1935 bdev_nvme_submit_request(ch1, second_bdev_io); 1936 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1937 1938 ctrlr->fail_reset = true; 1939 1940 poll_threads(); 1941 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1942 poll_threads(); 1943 1944 CU_ASSERT(nvme_ctrlr->resetting == false); 1945 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1946 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1947 1948 spdk_put_io_channel(ch1); 1949 1950 set_thread(1); 1951 1952 spdk_put_io_channel(ch2); 1953 1954 poll_threads(); 1955 1956 set_thread(0); 1957 1958 rc = bdev_nvme_delete("nvme0", &g_any_path); 1959 CU_ASSERT(rc == 0); 1960 1961 poll_threads(); 1962 spdk_delay_us(1000); 1963 poll_threads(); 1964 1965 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1966 1967 free(first_bdev_io); 1968 free(second_bdev_io); 1969 } 1970 1971 static void 1972 test_attach_ctrlr(void) 1973 { 1974 struct spdk_nvme_transport_id trid = {}; 1975 struct spdk_nvme_ctrlr *ctrlr; 1976 struct nvme_ctrlr *nvme_ctrlr; 1977 const int STRING_SIZE = 32; 1978 const char *attached_names[STRING_SIZE]; 1979 struct nvme_bdev *nbdev; 1980 int rc; 1981 1982 set_thread(0); 1983 1984 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1985 ut_init_trid(&trid); 1986 1987 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 1988 * by probe polling. 1989 */ 1990 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1991 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1992 1993 ctrlr->is_failed = true; 1994 g_ut_attach_ctrlr_status = -EIO; 1995 g_ut_attach_bdev_count = 0; 1996 1997 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1998 attach_ctrlr_done, NULL, NULL, NULL, false); 1999 CU_ASSERT(rc == 0); 2000 2001 spdk_delay_us(1000); 2002 poll_threads(); 2003 2004 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2005 2006 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 2007 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 2008 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2009 2010 g_ut_attach_ctrlr_status = 0; 2011 2012 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2013 attach_ctrlr_done, NULL, NULL, NULL, false); 2014 CU_ASSERT(rc == 0); 2015 2016 spdk_delay_us(1000); 2017 poll_threads(); 2018 2019 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2020 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2021 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2022 2023 rc = bdev_nvme_delete("nvme0", &g_any_path); 2024 CU_ASSERT(rc == 0); 2025 2026 poll_threads(); 2027 spdk_delay_us(1000); 2028 poll_threads(); 2029 2030 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2031 2032 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 2033 * one nvme_bdev is created. 2034 */ 2035 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2036 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2037 2038 g_ut_attach_bdev_count = 1; 2039 2040 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2041 attach_ctrlr_done, NULL, NULL, NULL, false); 2042 CU_ASSERT(rc == 0); 2043 2044 spdk_delay_us(1000); 2045 poll_threads(); 2046 2047 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2048 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2049 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2050 2051 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2052 attached_names[0] = NULL; 2053 2054 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2055 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2056 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2057 2058 rc = bdev_nvme_delete("nvme0", &g_any_path); 2059 CU_ASSERT(rc == 0); 2060 2061 poll_threads(); 2062 spdk_delay_us(1000); 2063 poll_threads(); 2064 2065 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2066 2067 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2068 * created because creating one nvme_bdev failed. 2069 */ 2070 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2071 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2072 2073 g_ut_register_bdev_status = -EINVAL; 2074 g_ut_attach_bdev_count = 0; 2075 2076 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2077 attach_ctrlr_done, NULL, NULL, NULL, false); 2078 CU_ASSERT(rc == 0); 2079 2080 spdk_delay_us(1000); 2081 poll_threads(); 2082 2083 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2084 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2085 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2086 2087 CU_ASSERT(attached_names[0] == NULL); 2088 2089 rc = bdev_nvme_delete("nvme0", &g_any_path); 2090 CU_ASSERT(rc == 0); 2091 2092 poll_threads(); 2093 spdk_delay_us(1000); 2094 poll_threads(); 2095 2096 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2097 2098 g_ut_register_bdev_status = 0; 2099 } 2100 2101 static void 2102 test_aer_cb(void) 2103 { 2104 struct spdk_nvme_transport_id trid = {}; 2105 struct spdk_nvme_ctrlr *ctrlr; 2106 struct nvme_ctrlr *nvme_ctrlr; 2107 struct nvme_bdev *bdev; 2108 const int STRING_SIZE = 32; 2109 const char *attached_names[STRING_SIZE]; 2110 union spdk_nvme_async_event_completion event = {}; 2111 struct spdk_nvme_cpl cpl = {}; 2112 int rc; 2113 2114 set_thread(0); 2115 2116 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2117 ut_init_trid(&trid); 2118 2119 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2120 * namespaces are populated. 2121 */ 2122 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2123 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2124 2125 ctrlr->ns[0].is_active = false; 2126 2127 g_ut_attach_ctrlr_status = 0; 2128 g_ut_attach_bdev_count = 3; 2129 2130 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2131 attach_ctrlr_done, NULL, NULL, NULL, false); 2132 CU_ASSERT(rc == 0); 2133 2134 spdk_delay_us(1000); 2135 poll_threads(); 2136 2137 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2138 poll_threads(); 2139 2140 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2141 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2142 2143 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2144 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2145 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2146 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2147 2148 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2149 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2150 CU_ASSERT(bdev->disk.blockcnt == 1024); 2151 2152 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2153 * change the size of the 4th namespace. 2154 */ 2155 ctrlr->ns[0].is_active = true; 2156 ctrlr->ns[2].is_active = false; 2157 ctrlr->nsdata[3].nsze = 2048; 2158 2159 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2160 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2161 cpl.cdw0 = event.raw; 2162 2163 aer_cb(nvme_ctrlr, &cpl); 2164 2165 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2166 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2167 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2168 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2169 CU_ASSERT(bdev->disk.blockcnt == 2048); 2170 2171 /* Change ANA state of active namespaces. */ 2172 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2173 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2174 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2175 2176 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2177 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2178 cpl.cdw0 = event.raw; 2179 2180 aer_cb(nvme_ctrlr, &cpl); 2181 2182 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2183 poll_threads(); 2184 2185 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2186 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2187 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2188 2189 rc = bdev_nvme_delete("nvme0", &g_any_path); 2190 CU_ASSERT(rc == 0); 2191 2192 poll_threads(); 2193 spdk_delay_us(1000); 2194 poll_threads(); 2195 2196 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2197 } 2198 2199 static void 2200 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2201 enum spdk_bdev_io_type io_type) 2202 { 2203 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2204 struct nvme_io_path *io_path; 2205 struct spdk_nvme_qpair *qpair; 2206 2207 io_path = bdev_nvme_find_io_path(nbdev_ch); 2208 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2209 qpair = io_path->qpair->qpair; 2210 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2211 2212 bdev_io->type = io_type; 2213 bdev_io->internal.in_submit_request = true; 2214 2215 bdev_nvme_submit_request(ch, bdev_io); 2216 2217 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2218 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2219 2220 poll_threads(); 2221 2222 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2223 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2224 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2225 } 2226 2227 static void 2228 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2229 enum spdk_bdev_io_type io_type) 2230 { 2231 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2232 struct nvme_io_path *io_path; 2233 struct spdk_nvme_qpair *qpair; 2234 2235 io_path = bdev_nvme_find_io_path(nbdev_ch); 2236 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2237 qpair = io_path->qpair->qpair; 2238 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2239 2240 bdev_io->type = io_type; 2241 bdev_io->internal.in_submit_request = true; 2242 2243 bdev_nvme_submit_request(ch, bdev_io); 2244 2245 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2246 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2247 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2248 } 2249 2250 static void 2251 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2252 { 2253 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2254 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2255 struct ut_nvme_req *req; 2256 struct nvme_io_path *io_path; 2257 struct spdk_nvme_qpair *qpair; 2258 2259 io_path = bdev_nvme_find_io_path(nbdev_ch); 2260 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2261 qpair = io_path->qpair->qpair; 2262 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2263 2264 /* Only compare and write now. */ 2265 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2266 bdev_io->internal.in_submit_request = true; 2267 2268 bdev_nvme_submit_request(ch, bdev_io); 2269 2270 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2271 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2272 CU_ASSERT(bio->first_fused_submitted == true); 2273 2274 /* First outstanding request is compare operation. */ 2275 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2276 SPDK_CU_ASSERT_FATAL(req != NULL); 2277 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2278 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2279 2280 poll_threads(); 2281 2282 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2283 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2284 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2285 } 2286 2287 static void 2288 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2289 struct spdk_nvme_ctrlr *ctrlr) 2290 { 2291 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2292 bdev_io->internal.in_submit_request = true; 2293 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2294 2295 bdev_nvme_submit_request(ch, bdev_io); 2296 2297 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2298 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2299 2300 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2301 poll_thread_times(1, 1); 2302 2303 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2304 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2305 2306 poll_thread_times(0, 1); 2307 2308 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2309 } 2310 2311 static void 2312 test_submit_nvme_cmd(void) 2313 { 2314 struct spdk_nvme_transport_id trid = {}; 2315 struct spdk_nvme_ctrlr *ctrlr; 2316 struct nvme_ctrlr *nvme_ctrlr; 2317 const int STRING_SIZE = 32; 2318 const char *attached_names[STRING_SIZE]; 2319 struct nvme_bdev *bdev; 2320 struct spdk_bdev_io *bdev_io; 2321 struct spdk_io_channel *ch; 2322 int rc; 2323 2324 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2325 ut_init_trid(&trid); 2326 2327 set_thread(1); 2328 2329 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2330 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2331 2332 g_ut_attach_ctrlr_status = 0; 2333 g_ut_attach_bdev_count = 1; 2334 2335 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2336 attach_ctrlr_done, NULL, NULL, NULL, false); 2337 CU_ASSERT(rc == 0); 2338 2339 spdk_delay_us(1000); 2340 poll_threads(); 2341 2342 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2343 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2344 2345 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2346 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2347 2348 set_thread(0); 2349 2350 ch = spdk_get_io_channel(bdev); 2351 SPDK_CU_ASSERT_FATAL(ch != NULL); 2352 2353 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2354 2355 bdev_io->u.bdev.iovs = NULL; 2356 2357 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2358 2359 ut_bdev_io_set_buf(bdev_io); 2360 2361 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2362 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2363 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2364 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2365 2366 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2367 2368 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2369 2370 /* Verify that ext NVME API is called when data is described by memory domain */ 2371 g_ut_readv_ext_called = false; 2372 bdev_io->u.bdev.memory_domain = (void *)0xdeadbeef; 2373 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2374 CU_ASSERT(g_ut_readv_ext_called == true); 2375 g_ut_readv_ext_called = false; 2376 bdev_io->u.bdev.memory_domain = NULL; 2377 2378 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2379 2380 free(bdev_io); 2381 2382 spdk_put_io_channel(ch); 2383 2384 poll_threads(); 2385 2386 set_thread(1); 2387 2388 rc = bdev_nvme_delete("nvme0", &g_any_path); 2389 CU_ASSERT(rc == 0); 2390 2391 poll_threads(); 2392 spdk_delay_us(1000); 2393 poll_threads(); 2394 2395 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2396 } 2397 2398 static void 2399 test_add_remove_trid(void) 2400 { 2401 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2402 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2403 struct nvme_ctrlr *nvme_ctrlr = NULL; 2404 const int STRING_SIZE = 32; 2405 const char *attached_names[STRING_SIZE]; 2406 struct nvme_path_id *ctrid; 2407 int rc; 2408 2409 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2410 ut_init_trid(&path1.trid); 2411 ut_init_trid2(&path2.trid); 2412 ut_init_trid3(&path3.trid); 2413 2414 set_thread(0); 2415 2416 g_ut_attach_ctrlr_status = 0; 2417 g_ut_attach_bdev_count = 0; 2418 2419 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2420 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2421 2422 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2423 attach_ctrlr_done, NULL, NULL, NULL, false); 2424 CU_ASSERT(rc == 0); 2425 2426 spdk_delay_us(1000); 2427 poll_threads(); 2428 2429 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2430 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2431 2432 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2433 2434 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2435 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2436 2437 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2438 attach_ctrlr_done, NULL, NULL, NULL, false); 2439 CU_ASSERT(rc == 0); 2440 2441 spdk_delay_us(1000); 2442 poll_threads(); 2443 2444 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2445 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2446 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2447 break; 2448 } 2449 } 2450 CU_ASSERT(ctrid != NULL); 2451 2452 /* trid3 is not in the registered list. */ 2453 rc = bdev_nvme_delete("nvme0", &path3); 2454 CU_ASSERT(rc == -ENXIO); 2455 2456 /* trid2 is not used, and simply removed. */ 2457 rc = bdev_nvme_delete("nvme0", &path2); 2458 CU_ASSERT(rc == 0); 2459 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2460 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2461 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2462 } 2463 2464 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2465 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2466 2467 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2468 attach_ctrlr_done, NULL, NULL, NULL, false); 2469 CU_ASSERT(rc == 0); 2470 2471 spdk_delay_us(1000); 2472 poll_threads(); 2473 2474 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2475 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2476 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2477 break; 2478 } 2479 } 2480 CU_ASSERT(ctrid != NULL); 2481 2482 /* path1 is currently used and path3 is an alternative path. 2483 * If we remove path1, path is changed to path3. 2484 */ 2485 rc = bdev_nvme_delete("nvme0", &path1); 2486 CU_ASSERT(rc == 0); 2487 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2488 CU_ASSERT(nvme_ctrlr->resetting == true); 2489 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2490 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2491 } 2492 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2493 2494 poll_threads(); 2495 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2496 poll_threads(); 2497 2498 CU_ASSERT(nvme_ctrlr->resetting == false); 2499 2500 /* path3 is the current and only path. If we remove path3, the corresponding 2501 * nvme_ctrlr is removed. 2502 */ 2503 rc = bdev_nvme_delete("nvme0", &path3); 2504 CU_ASSERT(rc == 0); 2505 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2506 2507 poll_threads(); 2508 spdk_delay_us(1000); 2509 poll_threads(); 2510 2511 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2512 2513 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2514 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2515 2516 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2517 attach_ctrlr_done, NULL, NULL, NULL, false); 2518 CU_ASSERT(rc == 0); 2519 2520 spdk_delay_us(1000); 2521 poll_threads(); 2522 2523 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2524 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2525 2526 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2527 2528 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2529 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2530 2531 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2532 attach_ctrlr_done, NULL, NULL, NULL, false); 2533 CU_ASSERT(rc == 0); 2534 2535 spdk_delay_us(1000); 2536 poll_threads(); 2537 2538 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2539 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2540 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2541 break; 2542 } 2543 } 2544 CU_ASSERT(ctrid != NULL); 2545 2546 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2547 rc = bdev_nvme_delete("nvme0", &g_any_path); 2548 CU_ASSERT(rc == 0); 2549 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2550 2551 poll_threads(); 2552 spdk_delay_us(1000); 2553 poll_threads(); 2554 2555 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2556 } 2557 2558 static void 2559 test_abort(void) 2560 { 2561 struct spdk_nvme_transport_id trid = {}; 2562 struct nvme_ctrlr_opts opts = {}; 2563 struct spdk_nvme_ctrlr *ctrlr; 2564 struct nvme_ctrlr *nvme_ctrlr; 2565 const int STRING_SIZE = 32; 2566 const char *attached_names[STRING_SIZE]; 2567 struct nvme_bdev *bdev; 2568 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2569 struct spdk_io_channel *ch1, *ch2; 2570 struct nvme_bdev_channel *nbdev_ch1; 2571 struct nvme_io_path *io_path1; 2572 struct nvme_qpair *nvme_qpair1; 2573 int rc; 2574 2575 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2576 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2577 * are submitted on thread 1. Both should succeed. 2578 */ 2579 2580 ut_init_trid(&trid); 2581 2582 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2583 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2584 2585 g_ut_attach_ctrlr_status = 0; 2586 g_ut_attach_bdev_count = 1; 2587 2588 set_thread(1); 2589 2590 opts.ctrlr_loss_timeout_sec = -1; 2591 opts.reconnect_delay_sec = 1; 2592 2593 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2594 attach_ctrlr_done, NULL, NULL, &opts, false); 2595 CU_ASSERT(rc == 0); 2596 2597 spdk_delay_us(1000); 2598 poll_threads(); 2599 2600 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2601 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2602 2603 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2604 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2605 2606 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2607 ut_bdev_io_set_buf(write_io); 2608 2609 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2610 ut_bdev_io_set_buf(fuse_io); 2611 2612 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2613 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2614 2615 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2616 2617 set_thread(0); 2618 2619 ch1 = spdk_get_io_channel(bdev); 2620 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2621 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2622 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2623 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2624 nvme_qpair1 = io_path1->qpair; 2625 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2626 2627 set_thread(1); 2628 2629 ch2 = spdk_get_io_channel(bdev); 2630 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2631 2632 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2633 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2634 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2635 2636 /* Aborting the already completed request should fail. */ 2637 write_io->internal.in_submit_request = true; 2638 bdev_nvme_submit_request(ch1, write_io); 2639 poll_threads(); 2640 2641 CU_ASSERT(write_io->internal.in_submit_request == false); 2642 2643 abort_io->u.abort.bio_to_abort = write_io; 2644 abort_io->internal.in_submit_request = true; 2645 2646 bdev_nvme_submit_request(ch1, abort_io); 2647 2648 poll_threads(); 2649 2650 CU_ASSERT(abort_io->internal.in_submit_request == false); 2651 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2652 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2653 2654 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2655 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2656 2657 admin_io->internal.in_submit_request = true; 2658 bdev_nvme_submit_request(ch1, admin_io); 2659 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2660 poll_threads(); 2661 2662 CU_ASSERT(admin_io->internal.in_submit_request == false); 2663 2664 abort_io->u.abort.bio_to_abort = admin_io; 2665 abort_io->internal.in_submit_request = true; 2666 2667 bdev_nvme_submit_request(ch2, abort_io); 2668 2669 poll_threads(); 2670 2671 CU_ASSERT(abort_io->internal.in_submit_request == false); 2672 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2673 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2674 2675 /* Aborting the write request should succeed. */ 2676 write_io->internal.in_submit_request = true; 2677 bdev_nvme_submit_request(ch1, write_io); 2678 2679 CU_ASSERT(write_io->internal.in_submit_request == true); 2680 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2681 2682 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2683 abort_io->u.abort.bio_to_abort = write_io; 2684 abort_io->internal.in_submit_request = true; 2685 2686 bdev_nvme_submit_request(ch1, abort_io); 2687 2688 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2689 poll_threads(); 2690 2691 CU_ASSERT(abort_io->internal.in_submit_request == false); 2692 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2693 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2694 CU_ASSERT(write_io->internal.in_submit_request == false); 2695 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2696 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2697 2698 /* Aborting the fuse request should succeed. */ 2699 fuse_io->internal.in_submit_request = true; 2700 bdev_nvme_submit_request(ch1, fuse_io); 2701 2702 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2703 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2704 2705 abort_io->u.abort.bio_to_abort = fuse_io; 2706 abort_io->internal.in_submit_request = true; 2707 2708 bdev_nvme_submit_request(ch1, abort_io); 2709 2710 spdk_delay_us(10000); 2711 poll_threads(); 2712 2713 CU_ASSERT(abort_io->internal.in_submit_request == false); 2714 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2715 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2716 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2717 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2718 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2719 2720 /* Aborting the admin request should succeed. */ 2721 admin_io->internal.in_submit_request = true; 2722 bdev_nvme_submit_request(ch1, admin_io); 2723 2724 CU_ASSERT(admin_io->internal.in_submit_request == true); 2725 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2726 2727 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2728 abort_io->u.abort.bio_to_abort = admin_io; 2729 abort_io->internal.in_submit_request = true; 2730 2731 bdev_nvme_submit_request(ch2, abort_io); 2732 2733 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2734 poll_threads(); 2735 2736 CU_ASSERT(abort_io->internal.in_submit_request == false); 2737 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2738 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2739 CU_ASSERT(admin_io->internal.in_submit_request == false); 2740 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2741 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2742 2743 set_thread(0); 2744 2745 /* If qpair is disconnected, it is freed and then reconnected via resetting 2746 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2747 * while resetting the nvme_ctrlr. 2748 */ 2749 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2750 2751 poll_thread_times(0, 3); 2752 2753 CU_ASSERT(nvme_qpair1->qpair == NULL); 2754 CU_ASSERT(nvme_ctrlr->resetting == true); 2755 2756 write_io->internal.in_submit_request = true; 2757 2758 bdev_nvme_submit_request(ch1, write_io); 2759 2760 CU_ASSERT(write_io->internal.in_submit_request == true); 2761 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2762 2763 /* Aborting the queued write request should succeed immediately. */ 2764 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2765 abort_io->u.abort.bio_to_abort = write_io; 2766 abort_io->internal.in_submit_request = true; 2767 2768 bdev_nvme_submit_request(ch1, abort_io); 2769 2770 CU_ASSERT(abort_io->internal.in_submit_request == false); 2771 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2772 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2773 CU_ASSERT(write_io->internal.in_submit_request == false); 2774 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2775 2776 poll_threads(); 2777 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2778 poll_threads(); 2779 2780 spdk_put_io_channel(ch1); 2781 2782 set_thread(1); 2783 2784 spdk_put_io_channel(ch2); 2785 2786 poll_threads(); 2787 2788 free(write_io); 2789 free(fuse_io); 2790 free(admin_io); 2791 free(abort_io); 2792 2793 set_thread(1); 2794 2795 rc = bdev_nvme_delete("nvme0", &g_any_path); 2796 CU_ASSERT(rc == 0); 2797 2798 poll_threads(); 2799 spdk_delay_us(1000); 2800 poll_threads(); 2801 2802 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2803 } 2804 2805 static void 2806 test_get_io_qpair(void) 2807 { 2808 struct spdk_nvme_transport_id trid = {}; 2809 struct spdk_nvme_ctrlr ctrlr = {}; 2810 struct nvme_ctrlr *nvme_ctrlr = NULL; 2811 struct spdk_io_channel *ch; 2812 struct nvme_ctrlr_channel *ctrlr_ch; 2813 struct spdk_nvme_qpair *qpair; 2814 int rc; 2815 2816 ut_init_trid(&trid); 2817 TAILQ_INIT(&ctrlr.active_io_qpairs); 2818 2819 set_thread(0); 2820 2821 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2822 CU_ASSERT(rc == 0); 2823 2824 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2825 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2826 2827 ch = spdk_get_io_channel(nvme_ctrlr); 2828 SPDK_CU_ASSERT_FATAL(ch != NULL); 2829 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2830 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2831 2832 qpair = bdev_nvme_get_io_qpair(ch); 2833 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2834 2835 spdk_put_io_channel(ch); 2836 2837 rc = bdev_nvme_delete("nvme0", &g_any_path); 2838 CU_ASSERT(rc == 0); 2839 2840 poll_threads(); 2841 spdk_delay_us(1000); 2842 poll_threads(); 2843 2844 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2845 } 2846 2847 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2848 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2849 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2850 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2851 */ 2852 static void 2853 test_bdev_unregister(void) 2854 { 2855 struct spdk_nvme_transport_id trid = {}; 2856 struct spdk_nvme_ctrlr *ctrlr; 2857 struct nvme_ctrlr *nvme_ctrlr; 2858 struct nvme_ns *nvme_ns1, *nvme_ns2; 2859 const int STRING_SIZE = 32; 2860 const char *attached_names[STRING_SIZE]; 2861 struct nvme_bdev *bdev1, *bdev2; 2862 int rc; 2863 2864 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2865 ut_init_trid(&trid); 2866 2867 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2868 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2869 2870 g_ut_attach_ctrlr_status = 0; 2871 g_ut_attach_bdev_count = 2; 2872 2873 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2874 attach_ctrlr_done, NULL, NULL, NULL, false); 2875 CU_ASSERT(rc == 0); 2876 2877 spdk_delay_us(1000); 2878 poll_threads(); 2879 2880 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2881 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2882 2883 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2884 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2885 2886 bdev1 = nvme_ns1->bdev; 2887 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2888 2889 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2890 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2891 2892 bdev2 = nvme_ns2->bdev; 2893 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2894 2895 bdev_nvme_destruct(&bdev1->disk); 2896 bdev_nvme_destruct(&bdev2->disk); 2897 2898 poll_threads(); 2899 2900 CU_ASSERT(nvme_ns1->bdev == NULL); 2901 CU_ASSERT(nvme_ns2->bdev == NULL); 2902 2903 nvme_ctrlr->destruct = true; 2904 _nvme_ctrlr_destruct(nvme_ctrlr); 2905 2906 poll_threads(); 2907 spdk_delay_us(1000); 2908 poll_threads(); 2909 2910 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2911 } 2912 2913 static void 2914 test_compare_ns(void) 2915 { 2916 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2917 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2918 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2919 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 2920 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 2921 2922 /* No IDs are defined. */ 2923 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2924 2925 /* Only EUI64 are defined and not matched. */ 2926 nsdata1.eui64 = 0xABCDEF0123456789; 2927 nsdata2.eui64 = 0xBBCDEF0123456789; 2928 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2929 2930 /* Only EUI64 are defined and matched. */ 2931 nsdata2.eui64 = 0xABCDEF0123456789; 2932 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2933 2934 /* Only NGUID are defined and not matched. */ 2935 nsdata1.eui64 = 0x0; 2936 nsdata2.eui64 = 0x0; 2937 nsdata1.nguid[0] = 0x12; 2938 nsdata2.nguid[0] = 0x10; 2939 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2940 2941 /* Only NGUID are defined and matched. */ 2942 nsdata2.nguid[0] = 0x12; 2943 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2944 2945 /* Only UUID are defined and not matched. */ 2946 nsdata1.nguid[0] = 0x0; 2947 nsdata2.nguid[0] = 0x0; 2948 ns1.uuid = &uuid1; 2949 ns2.uuid = &uuid2; 2950 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2951 2952 /* Only one UUID is defined. */ 2953 ns1.uuid = NULL; 2954 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2955 2956 /* Only UUID are defined and matched. */ 2957 ns1.uuid = &uuid2; 2958 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2959 2960 /* All EUI64, NGUID, and UUID are defined and matched. */ 2961 nsdata1.eui64 = 0x123456789ABCDEF; 2962 nsdata2.eui64 = 0x123456789ABCDEF; 2963 nsdata1.nguid[15] = 0x34; 2964 nsdata2.nguid[15] = 0x34; 2965 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2966 2967 /* CSI are not matched. */ 2968 ns1.csi = SPDK_NVME_CSI_ZNS; 2969 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2970 } 2971 2972 static void 2973 test_init_ana_log_page(void) 2974 { 2975 struct spdk_nvme_transport_id trid = {}; 2976 struct spdk_nvme_ctrlr *ctrlr; 2977 struct nvme_ctrlr *nvme_ctrlr; 2978 const int STRING_SIZE = 32; 2979 const char *attached_names[STRING_SIZE]; 2980 int rc; 2981 2982 set_thread(0); 2983 2984 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2985 ut_init_trid(&trid); 2986 2987 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 2988 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2989 2990 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2991 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2992 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2993 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 2994 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2995 2996 g_ut_attach_ctrlr_status = 0; 2997 g_ut_attach_bdev_count = 5; 2998 2999 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3000 attach_ctrlr_done, NULL, NULL, NULL, false); 3001 CU_ASSERT(rc == 0); 3002 3003 spdk_delay_us(1000); 3004 poll_threads(); 3005 3006 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3007 poll_threads(); 3008 3009 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3010 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3011 3012 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 3013 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 3014 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 3015 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 3016 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 3017 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 3018 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 3019 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 3020 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 3021 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 3022 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 3023 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3024 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3025 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3026 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3027 3028 rc = bdev_nvme_delete("nvme0", &g_any_path); 3029 CU_ASSERT(rc == 0); 3030 3031 poll_threads(); 3032 spdk_delay_us(1000); 3033 poll_threads(); 3034 3035 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3036 } 3037 3038 static void 3039 init_accel(void) 3040 { 3041 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3042 sizeof(int), "accel_p"); 3043 } 3044 3045 static void 3046 fini_accel(void) 3047 { 3048 spdk_io_device_unregister(g_accel_p, NULL); 3049 } 3050 3051 static void 3052 test_get_memory_domains(void) 3053 { 3054 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3055 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3056 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3057 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3058 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3059 struct spdk_memory_domain *domains[4] = {}; 3060 int rc = 0; 3061 3062 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3063 3064 /* nvme controller doesn't have memory domains */ 3065 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3066 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3067 CU_ASSERT(rc == 0); 3068 CU_ASSERT(domains[0] == NULL); 3069 CU_ASSERT(domains[1] == NULL); 3070 3071 /* nvme controller has a memory domain */ 3072 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3073 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3074 CU_ASSERT(rc == 1); 3075 CU_ASSERT(domains[0] != NULL); 3076 memset(domains, 0, sizeof(domains)); 3077 3078 /* multipath, 2 controllers report 1 memory domain each */ 3079 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3080 3081 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3082 CU_ASSERT(rc == 2); 3083 CU_ASSERT(domains[0] != NULL); 3084 CU_ASSERT(domains[1] != NULL); 3085 memset(domains, 0, sizeof(domains)); 3086 3087 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3088 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3089 CU_ASSERT(rc == 2); 3090 3091 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3092 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3093 CU_ASSERT(rc == 2); 3094 CU_ASSERT(domains[0] == NULL); 3095 CU_ASSERT(domains[1] == NULL); 3096 3097 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3098 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3099 CU_ASSERT(rc == 2); 3100 CU_ASSERT(domains[0] != NULL); 3101 CU_ASSERT(domains[1] == NULL); 3102 memset(domains, 0, sizeof(domains)); 3103 3104 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3105 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3106 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3107 CU_ASSERT(rc == 4); 3108 CU_ASSERT(domains[0] != NULL); 3109 CU_ASSERT(domains[1] != NULL); 3110 CU_ASSERT(domains[2] != NULL); 3111 CU_ASSERT(domains[3] != NULL); 3112 memset(domains, 0, sizeof(domains)); 3113 3114 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3115 * Array size is less than the number of memory domains */ 3116 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3117 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3118 CU_ASSERT(rc == 4); 3119 CU_ASSERT(domains[0] != NULL); 3120 CU_ASSERT(domains[1] != NULL); 3121 CU_ASSERT(domains[2] != NULL); 3122 CU_ASSERT(domains[3] == NULL); 3123 memset(domains, 0, sizeof(domains)); 3124 3125 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3126 } 3127 3128 static void 3129 test_reconnect_qpair(void) 3130 { 3131 struct spdk_nvme_transport_id trid = {}; 3132 struct spdk_nvme_ctrlr *ctrlr; 3133 struct nvme_ctrlr *nvme_ctrlr; 3134 const int STRING_SIZE = 32; 3135 const char *attached_names[STRING_SIZE]; 3136 struct nvme_bdev *bdev; 3137 struct spdk_io_channel *ch1, *ch2; 3138 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3139 struct nvme_io_path *io_path1, *io_path2; 3140 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3141 int rc; 3142 3143 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3144 ut_init_trid(&trid); 3145 3146 set_thread(0); 3147 3148 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3149 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3150 3151 g_ut_attach_ctrlr_status = 0; 3152 g_ut_attach_bdev_count = 1; 3153 3154 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3155 attach_ctrlr_done, NULL, NULL, NULL, false); 3156 CU_ASSERT(rc == 0); 3157 3158 spdk_delay_us(1000); 3159 poll_threads(); 3160 3161 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3162 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3163 3164 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3165 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3166 3167 ch1 = spdk_get_io_channel(bdev); 3168 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3169 3170 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3171 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3172 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3173 nvme_qpair1 = io_path1->qpair; 3174 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3175 3176 set_thread(1); 3177 3178 ch2 = spdk_get_io_channel(bdev); 3179 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3180 3181 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3182 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3183 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3184 nvme_qpair2 = io_path2->qpair; 3185 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3186 3187 /* If a qpair is disconnected, it is freed and then reconnected via 3188 * resetting the corresponding nvme_ctrlr. 3189 */ 3190 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3191 ctrlr->is_failed = true; 3192 3193 poll_thread_times(1, 3); 3194 CU_ASSERT(nvme_qpair1->qpair != NULL); 3195 CU_ASSERT(nvme_qpair2->qpair == NULL); 3196 CU_ASSERT(nvme_ctrlr->resetting == true); 3197 3198 poll_thread_times(0, 3); 3199 CU_ASSERT(nvme_qpair1->qpair == NULL); 3200 CU_ASSERT(nvme_qpair2->qpair == NULL); 3201 CU_ASSERT(ctrlr->is_failed == true); 3202 3203 poll_thread_times(1, 2); 3204 poll_thread_times(0, 1); 3205 CU_ASSERT(ctrlr->is_failed == false); 3206 CU_ASSERT(ctrlr->adminq.is_connected == false); 3207 3208 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3209 poll_thread_times(0, 2); 3210 CU_ASSERT(ctrlr->adminq.is_connected == true); 3211 3212 poll_thread_times(0, 1); 3213 poll_thread_times(1, 1); 3214 CU_ASSERT(nvme_qpair1->qpair != NULL); 3215 CU_ASSERT(nvme_qpair2->qpair != NULL); 3216 CU_ASSERT(nvme_ctrlr->resetting == true); 3217 3218 poll_thread_times(0, 2); 3219 poll_thread_times(1, 1); 3220 poll_thread_times(0, 1); 3221 CU_ASSERT(nvme_ctrlr->resetting == false); 3222 3223 poll_threads(); 3224 3225 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3226 * fails, the qpair is just freed. 3227 */ 3228 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3229 ctrlr->is_failed = true; 3230 ctrlr->fail_reset = true; 3231 3232 poll_thread_times(1, 3); 3233 CU_ASSERT(nvme_qpair1->qpair != NULL); 3234 CU_ASSERT(nvme_qpair2->qpair == NULL); 3235 CU_ASSERT(nvme_ctrlr->resetting == true); 3236 3237 poll_thread_times(0, 3); 3238 poll_thread_times(1, 1); 3239 CU_ASSERT(nvme_qpair1->qpair == NULL); 3240 CU_ASSERT(nvme_qpair2->qpair == NULL); 3241 CU_ASSERT(ctrlr->is_failed == true); 3242 3243 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3244 poll_thread_times(0, 3); 3245 poll_thread_times(1, 1); 3246 poll_thread_times(0, 1); 3247 CU_ASSERT(ctrlr->is_failed == true); 3248 CU_ASSERT(nvme_ctrlr->resetting == false); 3249 CU_ASSERT(nvme_qpair1->qpair == NULL); 3250 CU_ASSERT(nvme_qpair2->qpair == NULL); 3251 3252 poll_threads(); 3253 3254 spdk_put_io_channel(ch2); 3255 3256 set_thread(0); 3257 3258 spdk_put_io_channel(ch1); 3259 3260 poll_threads(); 3261 3262 rc = bdev_nvme_delete("nvme0", &g_any_path); 3263 CU_ASSERT(rc == 0); 3264 3265 poll_threads(); 3266 spdk_delay_us(1000); 3267 poll_threads(); 3268 3269 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3270 } 3271 3272 static void 3273 test_create_bdev_ctrlr(void) 3274 { 3275 struct nvme_path_id path1 = {}, path2 = {}; 3276 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3277 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3278 const int STRING_SIZE = 32; 3279 const char *attached_names[STRING_SIZE]; 3280 int rc; 3281 3282 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3283 ut_init_trid(&path1.trid); 3284 ut_init_trid2(&path2.trid); 3285 3286 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3287 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3288 3289 g_ut_attach_ctrlr_status = 0; 3290 g_ut_attach_bdev_count = 0; 3291 3292 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3293 attach_ctrlr_done, NULL, NULL, NULL, true); 3294 CU_ASSERT(rc == 0); 3295 3296 spdk_delay_us(1000); 3297 poll_threads(); 3298 3299 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3300 poll_threads(); 3301 3302 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3303 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3304 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3305 3306 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3307 g_ut_attach_ctrlr_status = -EINVAL; 3308 3309 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3310 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3311 3312 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3313 3314 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3315 attach_ctrlr_done, NULL, NULL, NULL, true); 3316 CU_ASSERT(rc == 0); 3317 3318 spdk_delay_us(1000); 3319 poll_threads(); 3320 3321 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3322 poll_threads(); 3323 3324 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3325 3326 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3327 g_ut_attach_ctrlr_status = 0; 3328 3329 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3330 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3331 3332 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3333 attach_ctrlr_done, NULL, NULL, NULL, true); 3334 CU_ASSERT(rc == 0); 3335 3336 spdk_delay_us(1000); 3337 poll_threads(); 3338 3339 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3340 poll_threads(); 3341 3342 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3343 3344 /* Delete two ctrlrs at once. */ 3345 rc = bdev_nvme_delete("nvme0", &g_any_path); 3346 CU_ASSERT(rc == 0); 3347 3348 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3349 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3350 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3351 3352 poll_threads(); 3353 spdk_delay_us(1000); 3354 poll_threads(); 3355 3356 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3357 3358 /* Add two ctrlrs and delete one by one. */ 3359 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3360 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3361 3362 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3363 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3364 3365 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3366 attach_ctrlr_done, NULL, NULL, NULL, true); 3367 CU_ASSERT(rc == 0); 3368 3369 spdk_delay_us(1000); 3370 poll_threads(); 3371 3372 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3373 poll_threads(); 3374 3375 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3376 attach_ctrlr_done, NULL, NULL, NULL, true); 3377 CU_ASSERT(rc == 0); 3378 3379 spdk_delay_us(1000); 3380 poll_threads(); 3381 3382 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3383 poll_threads(); 3384 3385 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3386 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3387 3388 rc = bdev_nvme_delete("nvme0", &path1); 3389 CU_ASSERT(rc == 0); 3390 3391 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3392 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3393 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3394 3395 poll_threads(); 3396 spdk_delay_us(1000); 3397 poll_threads(); 3398 3399 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3400 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3401 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3402 3403 rc = bdev_nvme_delete("nvme0", &path2); 3404 CU_ASSERT(rc == 0); 3405 3406 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3407 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3408 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3409 3410 poll_threads(); 3411 spdk_delay_us(1000); 3412 poll_threads(); 3413 3414 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3415 } 3416 3417 static struct nvme_ns * 3418 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3419 { 3420 struct nvme_ns *nvme_ns; 3421 3422 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3423 if (nvme_ns->ctrlr == nvme_ctrlr) { 3424 return nvme_ns; 3425 } 3426 } 3427 3428 return NULL; 3429 } 3430 3431 static void 3432 test_add_multi_ns_to_bdev(void) 3433 { 3434 struct nvme_path_id path1 = {}, path2 = {}; 3435 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3436 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3437 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3438 struct nvme_ns *nvme_ns1, *nvme_ns2; 3439 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3440 const int STRING_SIZE = 32; 3441 const char *attached_names[STRING_SIZE]; 3442 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3443 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3444 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3445 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3446 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3447 int rc; 3448 3449 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3450 ut_init_trid(&path1.trid); 3451 ut_init_trid2(&path2.trid); 3452 3453 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3454 3455 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3456 * namespaces are populated. 3457 */ 3458 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3459 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3460 3461 ctrlr1->ns[1].is_active = false; 3462 ctrlr1->ns[4].is_active = false; 3463 ctrlr1->ns[0].uuid = &uuid1; 3464 ctrlr1->ns[2].uuid = &uuid3; 3465 ctrlr1->ns[3].uuid = &uuid4; 3466 3467 g_ut_attach_ctrlr_status = 0; 3468 g_ut_attach_bdev_count = 3; 3469 3470 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3471 attach_ctrlr_done, NULL, NULL, NULL, true); 3472 CU_ASSERT(rc == 0); 3473 3474 spdk_delay_us(1000); 3475 poll_threads(); 3476 3477 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3478 poll_threads(); 3479 3480 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3481 * namespaces are populated. The uuid of 4th namespace is different, and hence 3482 * adding 4th namespace to a bdev should fail. 3483 */ 3484 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3485 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3486 3487 ctrlr2->ns[2].is_active = false; 3488 ctrlr2->ns[4].is_active = false; 3489 ctrlr2->ns[0].uuid = &uuid1; 3490 ctrlr2->ns[1].uuid = &uuid2; 3491 ctrlr2->ns[3].uuid = &uuid44; 3492 3493 g_ut_attach_ctrlr_status = 0; 3494 g_ut_attach_bdev_count = 2; 3495 3496 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3497 attach_ctrlr_done, NULL, NULL, NULL, true); 3498 CU_ASSERT(rc == 0); 3499 3500 spdk_delay_us(1000); 3501 poll_threads(); 3502 3503 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3504 poll_threads(); 3505 3506 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3507 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3508 3509 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3510 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3511 3512 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3513 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3514 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3515 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3516 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3517 3518 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3519 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3520 3521 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3522 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3523 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3524 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3525 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3526 3527 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3528 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3529 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3530 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3531 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3532 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3533 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3534 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3535 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3536 3537 CU_ASSERT(bdev1->ref == 2); 3538 CU_ASSERT(bdev2->ref == 1); 3539 CU_ASSERT(bdev3->ref == 1); 3540 CU_ASSERT(bdev4->ref == 1); 3541 3542 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3543 rc = bdev_nvme_delete("nvme0", &path1); 3544 CU_ASSERT(rc == 0); 3545 3546 poll_threads(); 3547 spdk_delay_us(1000); 3548 poll_threads(); 3549 3550 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3551 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3552 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3553 3554 rc = bdev_nvme_delete("nvme0", &path2); 3555 CU_ASSERT(rc == 0); 3556 3557 poll_threads(); 3558 spdk_delay_us(1000); 3559 poll_threads(); 3560 3561 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3562 3563 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3564 * can be deleted when the bdev subsystem shutdown. 3565 */ 3566 g_ut_attach_bdev_count = 1; 3567 3568 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3569 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3570 3571 ctrlr1->ns[0].uuid = &uuid1; 3572 3573 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3574 attach_ctrlr_done, NULL, NULL, NULL, true); 3575 CU_ASSERT(rc == 0); 3576 3577 spdk_delay_us(1000); 3578 poll_threads(); 3579 3580 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3581 poll_threads(); 3582 3583 ut_init_trid2(&path2.trid); 3584 3585 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3586 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3587 3588 ctrlr2->ns[0].uuid = &uuid1; 3589 3590 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3591 attach_ctrlr_done, NULL, NULL, NULL, true); 3592 CU_ASSERT(rc == 0); 3593 3594 spdk_delay_us(1000); 3595 poll_threads(); 3596 3597 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3598 poll_threads(); 3599 3600 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3601 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3602 3603 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3604 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3605 3606 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3607 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3608 3609 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3610 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3611 3612 /* Check if a nvme_bdev has two nvme_ns. */ 3613 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3614 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3615 CU_ASSERT(nvme_ns1->bdev == bdev1); 3616 3617 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3618 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3619 CU_ASSERT(nvme_ns2->bdev == bdev1); 3620 3621 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3622 bdev_nvme_destruct(&bdev1->disk); 3623 3624 poll_threads(); 3625 3626 CU_ASSERT(nvme_ns1->bdev == NULL); 3627 CU_ASSERT(nvme_ns2->bdev == NULL); 3628 3629 nvme_ctrlr1->destruct = true; 3630 _nvme_ctrlr_destruct(nvme_ctrlr1); 3631 3632 poll_threads(); 3633 spdk_delay_us(1000); 3634 poll_threads(); 3635 3636 nvme_ctrlr2->destruct = true; 3637 _nvme_ctrlr_destruct(nvme_ctrlr2); 3638 3639 poll_threads(); 3640 spdk_delay_us(1000); 3641 poll_threads(); 3642 3643 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3644 } 3645 3646 static void 3647 test_add_multi_io_paths_to_nbdev_ch(void) 3648 { 3649 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3650 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3651 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3652 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3653 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3654 const int STRING_SIZE = 32; 3655 const char *attached_names[STRING_SIZE]; 3656 struct nvme_bdev *bdev; 3657 struct spdk_io_channel *ch; 3658 struct nvme_bdev_channel *nbdev_ch; 3659 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3660 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3661 int rc; 3662 3663 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3664 ut_init_trid(&path1.trid); 3665 ut_init_trid2(&path2.trid); 3666 ut_init_trid3(&path3.trid); 3667 g_ut_attach_ctrlr_status = 0; 3668 g_ut_attach_bdev_count = 1; 3669 3670 set_thread(1); 3671 3672 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3673 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3674 3675 ctrlr1->ns[0].uuid = &uuid1; 3676 3677 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3678 attach_ctrlr_done, NULL, NULL, NULL, true); 3679 CU_ASSERT(rc == 0); 3680 3681 spdk_delay_us(1000); 3682 poll_threads(); 3683 3684 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3685 poll_threads(); 3686 3687 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3688 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3689 3690 ctrlr2->ns[0].uuid = &uuid1; 3691 3692 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3693 attach_ctrlr_done, NULL, NULL, NULL, true); 3694 CU_ASSERT(rc == 0); 3695 3696 spdk_delay_us(1000); 3697 poll_threads(); 3698 3699 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3700 poll_threads(); 3701 3702 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3703 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3704 3705 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3706 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3707 3708 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3709 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3710 3711 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3712 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3713 3714 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3715 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3716 3717 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3718 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3719 3720 set_thread(0); 3721 3722 ch = spdk_get_io_channel(bdev); 3723 SPDK_CU_ASSERT_FATAL(ch != NULL); 3724 nbdev_ch = spdk_io_channel_get_ctx(ch); 3725 3726 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3727 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3728 3729 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3730 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3731 3732 set_thread(1); 3733 3734 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3735 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3736 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3737 3738 ctrlr3->ns[0].uuid = &uuid1; 3739 3740 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3741 attach_ctrlr_done, NULL, NULL, NULL, true); 3742 CU_ASSERT(rc == 0); 3743 3744 spdk_delay_us(1000); 3745 poll_threads(); 3746 3747 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3748 poll_threads(); 3749 3750 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3751 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3752 3753 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3754 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3755 3756 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3757 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3758 3759 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3760 rc = bdev_nvme_delete("nvme0", &path2); 3761 CU_ASSERT(rc == 0); 3762 3763 poll_threads(); 3764 spdk_delay_us(1000); 3765 poll_threads(); 3766 3767 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3768 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3769 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3770 3771 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3772 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3773 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3774 3775 set_thread(0); 3776 3777 spdk_put_io_channel(ch); 3778 3779 poll_threads(); 3780 3781 set_thread(1); 3782 3783 rc = bdev_nvme_delete("nvme0", &g_any_path); 3784 CU_ASSERT(rc == 0); 3785 3786 poll_threads(); 3787 spdk_delay_us(1000); 3788 poll_threads(); 3789 3790 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3791 } 3792 3793 static void 3794 test_admin_path(void) 3795 { 3796 struct nvme_path_id path1 = {}, path2 = {}; 3797 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3798 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3799 const int STRING_SIZE = 32; 3800 const char *attached_names[STRING_SIZE]; 3801 struct nvme_bdev *bdev; 3802 struct spdk_io_channel *ch; 3803 struct spdk_bdev_io *bdev_io; 3804 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3805 int rc; 3806 3807 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3808 ut_init_trid(&path1.trid); 3809 ut_init_trid2(&path2.trid); 3810 g_ut_attach_ctrlr_status = 0; 3811 g_ut_attach_bdev_count = 1; 3812 3813 set_thread(0); 3814 3815 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3816 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3817 3818 ctrlr1->ns[0].uuid = &uuid1; 3819 3820 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3821 attach_ctrlr_done, NULL, NULL, NULL, true); 3822 CU_ASSERT(rc == 0); 3823 3824 spdk_delay_us(1000); 3825 poll_threads(); 3826 3827 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3828 poll_threads(); 3829 3830 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3831 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3832 3833 ctrlr2->ns[0].uuid = &uuid1; 3834 3835 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3836 attach_ctrlr_done, NULL, NULL, NULL, true); 3837 CU_ASSERT(rc == 0); 3838 3839 spdk_delay_us(1000); 3840 poll_threads(); 3841 3842 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3843 poll_threads(); 3844 3845 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3846 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3847 3848 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3849 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3850 3851 ch = spdk_get_io_channel(bdev); 3852 SPDK_CU_ASSERT_FATAL(ch != NULL); 3853 3854 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3855 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3856 3857 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3858 * submitted to ctrlr2. 3859 */ 3860 ctrlr1->is_failed = true; 3861 bdev_io->internal.in_submit_request = true; 3862 3863 bdev_nvme_submit_request(ch, bdev_io); 3864 3865 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3866 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3867 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3868 3869 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3870 poll_threads(); 3871 3872 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3873 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3874 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3875 3876 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3877 ctrlr2->is_failed = true; 3878 bdev_io->internal.in_submit_request = true; 3879 3880 bdev_nvme_submit_request(ch, bdev_io); 3881 3882 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3883 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3884 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3885 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3886 3887 free(bdev_io); 3888 3889 spdk_put_io_channel(ch); 3890 3891 poll_threads(); 3892 3893 rc = bdev_nvme_delete("nvme0", &g_any_path); 3894 CU_ASSERT(rc == 0); 3895 3896 poll_threads(); 3897 spdk_delay_us(1000); 3898 poll_threads(); 3899 3900 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3901 } 3902 3903 static struct nvme_io_path * 3904 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3905 struct nvme_ctrlr *nvme_ctrlr) 3906 { 3907 struct nvme_io_path *io_path; 3908 3909 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3910 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3911 return io_path; 3912 } 3913 } 3914 3915 return NULL; 3916 } 3917 3918 static void 3919 test_reset_bdev_ctrlr(void) 3920 { 3921 struct nvme_path_id path1 = {}, path2 = {}; 3922 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3923 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3924 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3925 struct nvme_path_id *curr_path1, *curr_path2; 3926 const int STRING_SIZE = 32; 3927 const char *attached_names[STRING_SIZE]; 3928 struct nvme_bdev *bdev; 3929 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 3930 struct nvme_bdev_io *first_bio; 3931 struct spdk_io_channel *ch1, *ch2; 3932 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3933 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 3934 int rc; 3935 3936 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3937 ut_init_trid(&path1.trid); 3938 ut_init_trid2(&path2.trid); 3939 g_ut_attach_ctrlr_status = 0; 3940 g_ut_attach_bdev_count = 1; 3941 3942 set_thread(0); 3943 3944 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3945 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3946 3947 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3948 attach_ctrlr_done, NULL, NULL, NULL, true); 3949 CU_ASSERT(rc == 0); 3950 3951 spdk_delay_us(1000); 3952 poll_threads(); 3953 3954 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3955 poll_threads(); 3956 3957 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3958 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3959 3960 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3961 attach_ctrlr_done, NULL, NULL, NULL, true); 3962 CU_ASSERT(rc == 0); 3963 3964 spdk_delay_us(1000); 3965 poll_threads(); 3966 3967 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3968 poll_threads(); 3969 3970 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3971 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3972 3973 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3974 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3975 3976 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 3977 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 3978 3979 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3980 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3981 3982 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 3983 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 3984 3985 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3986 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3987 3988 set_thread(0); 3989 3990 ch1 = spdk_get_io_channel(bdev); 3991 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3992 3993 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3994 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 3995 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 3996 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 3997 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 3998 3999 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 4000 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 4001 4002 set_thread(1); 4003 4004 ch2 = spdk_get_io_channel(bdev); 4005 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 4006 4007 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 4008 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 4009 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 4010 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 4011 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 4012 4013 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 4014 4015 /* The first reset request from bdev_io is submitted on thread 0. 4016 * Check if ctrlr1 is reset and then ctrlr2 is reset. 4017 * 4018 * A few extra polls are necessary after resetting ctrlr1 to check 4019 * pending reset requests for ctrlr1. 4020 */ 4021 ctrlr1->is_failed = true; 4022 curr_path1->is_failed = true; 4023 ctrlr2->is_failed = true; 4024 curr_path2->is_failed = true; 4025 4026 set_thread(0); 4027 4028 bdev_nvme_submit_request(ch1, first_bdev_io); 4029 CU_ASSERT(first_bio->io_path == io_path11); 4030 CU_ASSERT(nvme_ctrlr1->resetting == true); 4031 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 4032 4033 poll_thread_times(0, 3); 4034 CU_ASSERT(io_path11->qpair->qpair == NULL); 4035 CU_ASSERT(io_path21->qpair->qpair != NULL); 4036 4037 poll_thread_times(1, 2); 4038 CU_ASSERT(io_path11->qpair->qpair == NULL); 4039 CU_ASSERT(io_path21->qpair->qpair == NULL); 4040 CU_ASSERT(ctrlr1->is_failed == true); 4041 4042 poll_thread_times(0, 1); 4043 CU_ASSERT(nvme_ctrlr1->resetting == true); 4044 CU_ASSERT(ctrlr1->is_failed == false); 4045 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4046 CU_ASSERT(curr_path1->is_failed == true); 4047 4048 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4049 poll_thread_times(0, 2); 4050 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4051 4052 poll_thread_times(0, 1); 4053 CU_ASSERT(io_path11->qpair->qpair != NULL); 4054 CU_ASSERT(io_path21->qpair->qpair == NULL); 4055 4056 poll_thread_times(1, 1); 4057 CU_ASSERT(io_path11->qpair->qpair != NULL); 4058 CU_ASSERT(io_path21->qpair->qpair != NULL); 4059 4060 poll_thread_times(0, 2); 4061 CU_ASSERT(nvme_ctrlr1->resetting == true); 4062 poll_thread_times(1, 1); 4063 CU_ASSERT(nvme_ctrlr1->resetting == true); 4064 poll_thread_times(0, 2); 4065 CU_ASSERT(nvme_ctrlr1->resetting == false); 4066 CU_ASSERT(curr_path1->is_failed == false); 4067 CU_ASSERT(first_bio->io_path == io_path12); 4068 CU_ASSERT(nvme_ctrlr2->resetting == true); 4069 4070 poll_thread_times(0, 3); 4071 CU_ASSERT(io_path12->qpair->qpair == NULL); 4072 CU_ASSERT(io_path22->qpair->qpair != NULL); 4073 4074 poll_thread_times(1, 2); 4075 CU_ASSERT(io_path12->qpair->qpair == NULL); 4076 CU_ASSERT(io_path22->qpair->qpair == NULL); 4077 CU_ASSERT(ctrlr2->is_failed == true); 4078 4079 poll_thread_times(0, 1); 4080 CU_ASSERT(nvme_ctrlr2->resetting == true); 4081 CU_ASSERT(ctrlr2->is_failed == false); 4082 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4083 CU_ASSERT(curr_path2->is_failed == true); 4084 4085 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4086 poll_thread_times(0, 2); 4087 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4088 4089 poll_thread_times(0, 1); 4090 CU_ASSERT(io_path12->qpair->qpair != NULL); 4091 CU_ASSERT(io_path22->qpair->qpair == NULL); 4092 4093 poll_thread_times(1, 2); 4094 CU_ASSERT(io_path12->qpair->qpair != NULL); 4095 CU_ASSERT(io_path22->qpair->qpair != NULL); 4096 4097 poll_thread_times(0, 2); 4098 CU_ASSERT(nvme_ctrlr2->resetting == true); 4099 poll_thread_times(1, 1); 4100 CU_ASSERT(nvme_ctrlr2->resetting == true); 4101 poll_thread_times(0, 2); 4102 CU_ASSERT(first_bio->io_path == NULL); 4103 CU_ASSERT(nvme_ctrlr2->resetting == false); 4104 CU_ASSERT(curr_path2->is_failed == false); 4105 4106 poll_threads(); 4107 4108 /* There is a race between two reset requests from bdev_io. 4109 * 4110 * The first reset request is submitted on thread 0, and the second reset 4111 * request is submitted on thread 1 while the first is resetting ctrlr1. 4112 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4113 * both reset requests go to ctrlr2. The first comes earlier than the second. 4114 * The second is pending on ctrlr2 again. After the first completes resetting 4115 * ctrl2, both complete successfully. 4116 */ 4117 ctrlr1->is_failed = true; 4118 curr_path1->is_failed = true; 4119 ctrlr2->is_failed = true; 4120 curr_path2->is_failed = true; 4121 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4122 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4123 4124 set_thread(0); 4125 4126 bdev_nvme_submit_request(ch1, first_bdev_io); 4127 4128 set_thread(1); 4129 4130 bdev_nvme_submit_request(ch2, second_bdev_io); 4131 4132 CU_ASSERT(nvme_ctrlr1->resetting == true); 4133 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 4134 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4135 4136 poll_threads(); 4137 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4138 poll_threads(); 4139 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4140 poll_threads(); 4141 4142 CU_ASSERT(ctrlr1->is_failed == false); 4143 CU_ASSERT(curr_path1->is_failed == false); 4144 CU_ASSERT(ctrlr2->is_failed == false); 4145 CU_ASSERT(curr_path2->is_failed == false); 4146 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4147 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4148 4149 set_thread(0); 4150 4151 spdk_put_io_channel(ch1); 4152 4153 set_thread(1); 4154 4155 spdk_put_io_channel(ch2); 4156 4157 poll_threads(); 4158 4159 set_thread(0); 4160 4161 rc = bdev_nvme_delete("nvme0", &g_any_path); 4162 CU_ASSERT(rc == 0); 4163 4164 poll_threads(); 4165 spdk_delay_us(1000); 4166 poll_threads(); 4167 4168 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4169 4170 free(first_bdev_io); 4171 free(second_bdev_io); 4172 } 4173 4174 static void 4175 test_find_io_path(void) 4176 { 4177 struct nvme_bdev_channel nbdev_ch = { 4178 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4179 }; 4180 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4181 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4182 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4183 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4184 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4185 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4186 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 4187 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4188 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4189 4190 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4191 4192 /* Test if io_path whose ANA state is not accessible is excluded. */ 4193 4194 nvme_qpair1.qpair = &qpair1; 4195 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4196 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4197 4198 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4199 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4200 4201 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4202 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4203 4204 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4205 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4206 4207 nbdev_ch.current_io_path = NULL; 4208 4209 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4210 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4211 4212 nbdev_ch.current_io_path = NULL; 4213 4214 /* Test if io_path whose qpair is resetting is excluded. */ 4215 4216 nvme_qpair1.qpair = NULL; 4217 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4218 4219 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4220 4221 /* Test if ANA optimized state or the first found ANA non-optimized state 4222 * is prioritized. 4223 */ 4224 4225 nvme_qpair1.qpair = &qpair1; 4226 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4227 nvme_qpair2.qpair = &qpair2; 4228 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4229 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4230 4231 nbdev_ch.current_io_path = NULL; 4232 4233 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4234 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4235 4236 nbdev_ch.current_io_path = NULL; 4237 } 4238 4239 static void 4240 test_retry_io_if_ana_state_is_updating(void) 4241 { 4242 struct nvme_path_id path = {}; 4243 struct nvme_ctrlr_opts opts = {}; 4244 struct spdk_nvme_ctrlr *ctrlr; 4245 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4246 struct nvme_ctrlr *nvme_ctrlr; 4247 const int STRING_SIZE = 32; 4248 const char *attached_names[STRING_SIZE]; 4249 struct nvme_bdev *bdev; 4250 struct nvme_ns *nvme_ns; 4251 struct spdk_bdev_io *bdev_io1; 4252 struct spdk_io_channel *ch; 4253 struct nvme_bdev_channel *nbdev_ch; 4254 struct nvme_io_path *io_path; 4255 struct nvme_qpair *nvme_qpair; 4256 int rc; 4257 4258 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4259 ut_init_trid(&path.trid); 4260 4261 set_thread(0); 4262 4263 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4264 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4265 4266 g_ut_attach_ctrlr_status = 0; 4267 g_ut_attach_bdev_count = 1; 4268 4269 opts.ctrlr_loss_timeout_sec = -1; 4270 opts.reconnect_delay_sec = 1; 4271 4272 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4273 attach_ctrlr_done, NULL, NULL, &opts, false); 4274 CU_ASSERT(rc == 0); 4275 4276 spdk_delay_us(1000); 4277 poll_threads(); 4278 4279 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4280 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4281 4282 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4283 CU_ASSERT(nvme_ctrlr != NULL); 4284 4285 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4286 CU_ASSERT(bdev != NULL); 4287 4288 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4289 CU_ASSERT(nvme_ns != NULL); 4290 4291 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4292 ut_bdev_io_set_buf(bdev_io1); 4293 4294 ch = spdk_get_io_channel(bdev); 4295 SPDK_CU_ASSERT_FATAL(ch != NULL); 4296 4297 nbdev_ch = spdk_io_channel_get_ctx(ch); 4298 4299 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4300 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4301 4302 nvme_qpair = io_path->qpair; 4303 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4304 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4305 4306 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4307 4308 /* If qpair is connected, I/O should succeed. */ 4309 bdev_io1->internal.in_submit_request = true; 4310 4311 bdev_nvme_submit_request(ch, bdev_io1); 4312 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4313 4314 poll_threads(); 4315 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4316 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4317 4318 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4319 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4320 nbdev_ch->current_io_path = NULL; 4321 4322 bdev_io1->internal.in_submit_request = true; 4323 4324 bdev_nvme_submit_request(ch, bdev_io1); 4325 4326 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4327 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4328 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4329 4330 /* ANA state became accessible while I/O was queued. */ 4331 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4332 4333 spdk_delay_us(1000000); 4334 4335 poll_thread_times(0, 1); 4336 4337 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4338 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4339 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4340 4341 poll_threads(); 4342 4343 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4344 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4345 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4346 4347 free(bdev_io1); 4348 4349 spdk_put_io_channel(ch); 4350 4351 poll_threads(); 4352 4353 rc = bdev_nvme_delete("nvme0", &g_any_path); 4354 CU_ASSERT(rc == 0); 4355 4356 poll_threads(); 4357 spdk_delay_us(1000); 4358 poll_threads(); 4359 4360 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4361 } 4362 4363 static void 4364 test_retry_io_for_io_path_error(void) 4365 { 4366 struct nvme_path_id path1 = {}, path2 = {}; 4367 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4368 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4369 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4370 const int STRING_SIZE = 32; 4371 const char *attached_names[STRING_SIZE]; 4372 struct nvme_bdev *bdev; 4373 struct nvme_ns *nvme_ns1, *nvme_ns2; 4374 struct spdk_bdev_io *bdev_io; 4375 struct nvme_bdev_io *bio; 4376 struct spdk_io_channel *ch; 4377 struct nvme_bdev_channel *nbdev_ch; 4378 struct nvme_io_path *io_path1, *io_path2; 4379 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4380 struct ut_nvme_req *req; 4381 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4382 int rc; 4383 4384 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4385 ut_init_trid(&path1.trid); 4386 ut_init_trid2(&path2.trid); 4387 4388 g_opts.bdev_retry_count = 1; 4389 4390 set_thread(0); 4391 4392 g_ut_attach_ctrlr_status = 0; 4393 g_ut_attach_bdev_count = 1; 4394 4395 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4396 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4397 4398 ctrlr1->ns[0].uuid = &uuid1; 4399 4400 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4401 attach_ctrlr_done, NULL, NULL, NULL, true); 4402 CU_ASSERT(rc == 0); 4403 4404 spdk_delay_us(1000); 4405 poll_threads(); 4406 4407 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4408 poll_threads(); 4409 4410 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4411 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4412 4413 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4414 CU_ASSERT(nvme_ctrlr1 != NULL); 4415 4416 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4417 CU_ASSERT(bdev != NULL); 4418 4419 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4420 CU_ASSERT(nvme_ns1 != NULL); 4421 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4422 4423 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4424 ut_bdev_io_set_buf(bdev_io); 4425 4426 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4427 4428 ch = spdk_get_io_channel(bdev); 4429 SPDK_CU_ASSERT_FATAL(ch != NULL); 4430 4431 nbdev_ch = spdk_io_channel_get_ctx(ch); 4432 4433 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4434 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4435 4436 nvme_qpair1 = io_path1->qpair; 4437 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4438 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4439 4440 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4441 4442 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4443 bdev_io->internal.in_submit_request = true; 4444 4445 bdev_nvme_submit_request(ch, bdev_io); 4446 4447 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4448 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4449 4450 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4451 SPDK_CU_ASSERT_FATAL(req != NULL); 4452 4453 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4454 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4455 req->cpl.status.dnr = 1; 4456 4457 poll_thread_times(0, 1); 4458 4459 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4460 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4461 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4462 4463 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4464 bdev_io->internal.in_submit_request = true; 4465 4466 bdev_nvme_submit_request(ch, bdev_io); 4467 4468 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4469 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4470 4471 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4472 SPDK_CU_ASSERT_FATAL(req != NULL); 4473 4474 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4475 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4476 4477 poll_thread_times(0, 1); 4478 4479 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4480 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4481 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4482 4483 poll_threads(); 4484 4485 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4486 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4487 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4488 4489 /* Add io_path2 dynamically, and create a multipath configuration. */ 4490 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4491 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4492 4493 ctrlr2->ns[0].uuid = &uuid1; 4494 4495 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4496 attach_ctrlr_done, NULL, NULL, NULL, true); 4497 CU_ASSERT(rc == 0); 4498 4499 spdk_delay_us(1000); 4500 poll_threads(); 4501 4502 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4503 poll_threads(); 4504 4505 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4506 CU_ASSERT(nvme_ctrlr2 != NULL); 4507 4508 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4509 CU_ASSERT(nvme_ns2 != NULL); 4510 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4511 4512 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4513 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4514 4515 nvme_qpair2 = io_path2->qpair; 4516 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4517 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4518 4519 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4520 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4521 * So after a retry, I/O is submitted to io_path2 and should succeed. 4522 */ 4523 bdev_io->internal.in_submit_request = true; 4524 4525 bdev_nvme_submit_request(ch, bdev_io); 4526 4527 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4528 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4529 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4530 4531 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4532 SPDK_CU_ASSERT_FATAL(req != NULL); 4533 4534 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4535 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4536 4537 poll_thread_times(0, 1); 4538 4539 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4540 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4541 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4542 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4543 4544 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4545 nvme_qpair1->qpair = NULL; 4546 4547 poll_threads(); 4548 4549 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4550 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4551 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4552 4553 free(bdev_io); 4554 4555 spdk_put_io_channel(ch); 4556 4557 poll_threads(); 4558 4559 rc = bdev_nvme_delete("nvme0", &g_any_path); 4560 CU_ASSERT(rc == 0); 4561 4562 poll_threads(); 4563 spdk_delay_us(1000); 4564 poll_threads(); 4565 4566 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4567 4568 g_opts.bdev_retry_count = 0; 4569 } 4570 4571 static void 4572 test_retry_io_count(void) 4573 { 4574 struct nvme_path_id path = {}; 4575 struct spdk_nvme_ctrlr *ctrlr; 4576 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4577 struct nvme_ctrlr *nvme_ctrlr; 4578 const int STRING_SIZE = 32; 4579 const char *attached_names[STRING_SIZE]; 4580 struct nvme_bdev *bdev; 4581 struct nvme_ns *nvme_ns; 4582 struct spdk_bdev_io *bdev_io; 4583 struct nvme_bdev_io *bio; 4584 struct spdk_io_channel *ch; 4585 struct nvme_bdev_channel *nbdev_ch; 4586 struct nvme_io_path *io_path; 4587 struct nvme_qpair *nvme_qpair; 4588 struct ut_nvme_req *req; 4589 int rc; 4590 4591 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4592 ut_init_trid(&path.trid); 4593 4594 set_thread(0); 4595 4596 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4597 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4598 4599 g_ut_attach_ctrlr_status = 0; 4600 g_ut_attach_bdev_count = 1; 4601 4602 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4603 attach_ctrlr_done, NULL, NULL, NULL, false); 4604 CU_ASSERT(rc == 0); 4605 4606 spdk_delay_us(1000); 4607 poll_threads(); 4608 4609 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4610 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4611 4612 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4613 CU_ASSERT(nvme_ctrlr != NULL); 4614 4615 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4616 CU_ASSERT(bdev != NULL); 4617 4618 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4619 CU_ASSERT(nvme_ns != NULL); 4620 4621 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4622 ut_bdev_io_set_buf(bdev_io); 4623 4624 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4625 4626 ch = spdk_get_io_channel(bdev); 4627 SPDK_CU_ASSERT_FATAL(ch != NULL); 4628 4629 nbdev_ch = spdk_io_channel_get_ctx(ch); 4630 4631 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4632 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4633 4634 nvme_qpair = io_path->qpair; 4635 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4636 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4637 4638 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4639 4640 /* If I/O is aborted by request, it should not be retried. */ 4641 g_opts.bdev_retry_count = 1; 4642 4643 bdev_io->internal.in_submit_request = true; 4644 4645 bdev_nvme_submit_request(ch, bdev_io); 4646 4647 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4648 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4649 4650 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4651 SPDK_CU_ASSERT_FATAL(req != NULL); 4652 4653 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4654 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4655 4656 poll_thread_times(0, 1); 4657 4658 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4659 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4660 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4661 4662 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4663 * the failed I/O should not be retried. 4664 */ 4665 g_opts.bdev_retry_count = 4; 4666 4667 bdev_io->internal.in_submit_request = true; 4668 4669 bdev_nvme_submit_request(ch, bdev_io); 4670 4671 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4672 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4673 4674 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4675 SPDK_CU_ASSERT_FATAL(req != NULL); 4676 4677 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4678 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4679 bio->retry_count = 4; 4680 4681 poll_thread_times(0, 1); 4682 4683 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4684 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4685 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4686 4687 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4688 g_opts.bdev_retry_count = -1; 4689 4690 bdev_io->internal.in_submit_request = true; 4691 4692 bdev_nvme_submit_request(ch, bdev_io); 4693 4694 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4695 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4696 4697 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4698 SPDK_CU_ASSERT_FATAL(req != NULL); 4699 4700 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4701 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4702 bio->retry_count = 4; 4703 4704 poll_thread_times(0, 1); 4705 4706 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4707 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4708 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4709 4710 poll_threads(); 4711 4712 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4713 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4714 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4715 4716 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4717 * the failed I/O should be retried. 4718 */ 4719 g_opts.bdev_retry_count = 4; 4720 4721 bdev_io->internal.in_submit_request = true; 4722 4723 bdev_nvme_submit_request(ch, bdev_io); 4724 4725 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4726 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4727 4728 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4729 SPDK_CU_ASSERT_FATAL(req != NULL); 4730 4731 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4732 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4733 bio->retry_count = 3; 4734 4735 poll_thread_times(0, 1); 4736 4737 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4738 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4739 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4740 4741 poll_threads(); 4742 4743 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4744 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4745 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4746 4747 free(bdev_io); 4748 4749 spdk_put_io_channel(ch); 4750 4751 poll_threads(); 4752 4753 rc = bdev_nvme_delete("nvme0", &g_any_path); 4754 CU_ASSERT(rc == 0); 4755 4756 poll_threads(); 4757 spdk_delay_us(1000); 4758 poll_threads(); 4759 4760 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4761 4762 g_opts.bdev_retry_count = 0; 4763 } 4764 4765 static void 4766 test_concurrent_read_ana_log_page(void) 4767 { 4768 struct spdk_nvme_transport_id trid = {}; 4769 struct spdk_nvme_ctrlr *ctrlr; 4770 struct nvme_ctrlr *nvme_ctrlr; 4771 const int STRING_SIZE = 32; 4772 const char *attached_names[STRING_SIZE]; 4773 int rc; 4774 4775 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4776 ut_init_trid(&trid); 4777 4778 set_thread(0); 4779 4780 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4781 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4782 4783 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4784 4785 g_ut_attach_ctrlr_status = 0; 4786 g_ut_attach_bdev_count = 1; 4787 4788 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4789 attach_ctrlr_done, NULL, NULL, NULL, false); 4790 CU_ASSERT(rc == 0); 4791 4792 spdk_delay_us(1000); 4793 poll_threads(); 4794 4795 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4796 poll_threads(); 4797 4798 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4799 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4800 4801 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4802 4803 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4804 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4805 4806 /* Following read request should be rejected. */ 4807 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4808 4809 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4810 4811 set_thread(1); 4812 4813 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4814 4815 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4816 4817 /* Reset request while reading ANA log page should not be rejected. */ 4818 rc = bdev_nvme_reset(nvme_ctrlr); 4819 CU_ASSERT(rc == 0); 4820 4821 poll_threads(); 4822 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4823 poll_threads(); 4824 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4825 poll_threads(); 4826 4827 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4828 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4829 4830 /* Read ANA log page while resetting ctrlr should be rejected. */ 4831 rc = bdev_nvme_reset(nvme_ctrlr); 4832 CU_ASSERT(rc == 0); 4833 4834 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4835 4836 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4837 4838 poll_threads(); 4839 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4840 poll_threads(); 4841 4842 set_thread(0); 4843 4844 rc = bdev_nvme_delete("nvme0", &g_any_path); 4845 CU_ASSERT(rc == 0); 4846 4847 poll_threads(); 4848 spdk_delay_us(1000); 4849 poll_threads(); 4850 4851 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4852 } 4853 4854 static void 4855 test_retry_io_for_ana_error(void) 4856 { 4857 struct nvme_path_id path = {}; 4858 struct spdk_nvme_ctrlr *ctrlr; 4859 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4860 struct nvme_ctrlr *nvme_ctrlr; 4861 const int STRING_SIZE = 32; 4862 const char *attached_names[STRING_SIZE]; 4863 struct nvme_bdev *bdev; 4864 struct nvme_ns *nvme_ns; 4865 struct spdk_bdev_io *bdev_io; 4866 struct nvme_bdev_io *bio; 4867 struct spdk_io_channel *ch; 4868 struct nvme_bdev_channel *nbdev_ch; 4869 struct nvme_io_path *io_path; 4870 struct nvme_qpair *nvme_qpair; 4871 struct ut_nvme_req *req; 4872 uint64_t now; 4873 int rc; 4874 4875 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4876 ut_init_trid(&path.trid); 4877 4878 g_opts.bdev_retry_count = 1; 4879 4880 set_thread(0); 4881 4882 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4883 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4884 4885 g_ut_attach_ctrlr_status = 0; 4886 g_ut_attach_bdev_count = 1; 4887 4888 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4889 attach_ctrlr_done, NULL, NULL, NULL, false); 4890 CU_ASSERT(rc == 0); 4891 4892 spdk_delay_us(1000); 4893 poll_threads(); 4894 4895 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4896 poll_threads(); 4897 4898 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4899 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4900 4901 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4902 CU_ASSERT(nvme_ctrlr != NULL); 4903 4904 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4905 CU_ASSERT(bdev != NULL); 4906 4907 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4908 CU_ASSERT(nvme_ns != NULL); 4909 4910 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4911 ut_bdev_io_set_buf(bdev_io); 4912 4913 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4914 4915 ch = spdk_get_io_channel(bdev); 4916 SPDK_CU_ASSERT_FATAL(ch != NULL); 4917 4918 nbdev_ch = spdk_io_channel_get_ctx(ch); 4919 4920 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4921 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4922 4923 nvme_qpair = io_path->qpair; 4924 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4925 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4926 4927 now = spdk_get_ticks(); 4928 4929 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4930 4931 /* If I/O got ANA error, it should be queued, the corresponding namespace 4932 * should be freezed and its ANA state should be updated. 4933 */ 4934 bdev_io->internal.in_submit_request = true; 4935 4936 bdev_nvme_submit_request(ch, bdev_io); 4937 4938 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4939 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4940 4941 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4942 SPDK_CU_ASSERT_FATAL(req != NULL); 4943 4944 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4945 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 4946 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4947 4948 poll_thread_times(0, 1); 4949 4950 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4951 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4952 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4953 /* I/O should be retried immediately. */ 4954 CU_ASSERT(bio->retry_ticks == now); 4955 CU_ASSERT(nvme_ns->ana_state_updating == true); 4956 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4957 4958 poll_threads(); 4959 4960 /* Namespace is inaccessible, and hence I/O should be queued again. */ 4961 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4962 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4963 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4964 /* I/O should be retried after a second if no I/O path was found but 4965 * any I/O path may become available. 4966 */ 4967 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 4968 4969 /* Namespace should be unfreezed after completing to update its ANA state. */ 4970 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4971 poll_threads(); 4972 4973 CU_ASSERT(nvme_ns->ana_state_updating == false); 4974 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 4975 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4976 4977 /* Retry the queued I/O should succeed. */ 4978 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 4979 poll_threads(); 4980 4981 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4982 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4983 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4984 4985 free(bdev_io); 4986 4987 spdk_put_io_channel(ch); 4988 4989 poll_threads(); 4990 4991 rc = bdev_nvme_delete("nvme0", &g_any_path); 4992 CU_ASSERT(rc == 0); 4993 4994 poll_threads(); 4995 spdk_delay_us(1000); 4996 poll_threads(); 4997 4998 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4999 5000 g_opts.bdev_retry_count = 0; 5001 } 5002 5003 static void 5004 test_check_io_error_resiliency_params(void) 5005 { 5006 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 5007 * 3rd parameter is fast_io_fail_timeout_sec. 5008 */ 5009 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 5010 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 5011 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 5012 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 5013 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 5014 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 5015 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 5016 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 5017 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 5018 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 5019 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 5020 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 5021 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 5022 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 5023 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5024 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5025 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5026 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5027 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5028 } 5029 5030 static void 5031 test_retry_io_if_ctrlr_is_resetting(void) 5032 { 5033 struct nvme_path_id path = {}; 5034 struct nvme_ctrlr_opts opts = {}; 5035 struct spdk_nvme_ctrlr *ctrlr; 5036 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5037 struct nvme_ctrlr *nvme_ctrlr; 5038 const int STRING_SIZE = 32; 5039 const char *attached_names[STRING_SIZE]; 5040 struct nvme_bdev *bdev; 5041 struct nvme_ns *nvme_ns; 5042 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5043 struct spdk_io_channel *ch; 5044 struct nvme_bdev_channel *nbdev_ch; 5045 struct nvme_io_path *io_path; 5046 struct nvme_qpair *nvme_qpair; 5047 int rc; 5048 5049 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5050 ut_init_trid(&path.trid); 5051 5052 set_thread(0); 5053 5054 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5055 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5056 5057 g_ut_attach_ctrlr_status = 0; 5058 g_ut_attach_bdev_count = 1; 5059 5060 opts.ctrlr_loss_timeout_sec = -1; 5061 opts.reconnect_delay_sec = 1; 5062 5063 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5064 attach_ctrlr_done, NULL, NULL, &opts, false); 5065 CU_ASSERT(rc == 0); 5066 5067 spdk_delay_us(1000); 5068 poll_threads(); 5069 5070 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5071 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5072 5073 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5074 CU_ASSERT(nvme_ctrlr != NULL); 5075 5076 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5077 CU_ASSERT(bdev != NULL); 5078 5079 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5080 CU_ASSERT(nvme_ns != NULL); 5081 5082 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5083 ut_bdev_io_set_buf(bdev_io1); 5084 5085 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5086 ut_bdev_io_set_buf(bdev_io2); 5087 5088 ch = spdk_get_io_channel(bdev); 5089 SPDK_CU_ASSERT_FATAL(ch != NULL); 5090 5091 nbdev_ch = spdk_io_channel_get_ctx(ch); 5092 5093 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5094 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5095 5096 nvme_qpair = io_path->qpair; 5097 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5098 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5099 5100 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5101 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5102 5103 /* If qpair is connected, I/O should succeed. */ 5104 bdev_io1->internal.in_submit_request = true; 5105 5106 bdev_nvme_submit_request(ch, bdev_io1); 5107 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5108 5109 poll_threads(); 5110 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5111 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5112 5113 /* If qpair is disconnected, it is freed and then reconnected via resetting 5114 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5115 * while resetting the nvme_ctrlr. 5116 */ 5117 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5118 ctrlr->is_failed = true; 5119 5120 poll_thread_times(0, 5); 5121 5122 CU_ASSERT(nvme_qpair->qpair == NULL); 5123 CU_ASSERT(nvme_ctrlr->resetting == true); 5124 CU_ASSERT(ctrlr->is_failed == false); 5125 5126 bdev_io1->internal.in_submit_request = true; 5127 5128 bdev_nvme_submit_request(ch, bdev_io1); 5129 5130 spdk_delay_us(1); 5131 5132 bdev_io2->internal.in_submit_request = true; 5133 5134 bdev_nvme_submit_request(ch, bdev_io2); 5135 5136 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5137 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5138 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5139 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5140 5141 poll_threads(); 5142 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5143 poll_threads(); 5144 5145 CU_ASSERT(nvme_qpair->qpair != NULL); 5146 CU_ASSERT(nvme_ctrlr->resetting == false); 5147 5148 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5149 5150 poll_thread_times(0, 1); 5151 5152 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5153 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5154 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5155 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5156 5157 poll_threads(); 5158 5159 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5160 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5161 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5162 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5163 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5164 5165 spdk_delay_us(1); 5166 5167 poll_thread_times(0, 1); 5168 5169 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5170 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5171 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5172 5173 poll_threads(); 5174 5175 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5176 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5177 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5178 5179 free(bdev_io1); 5180 free(bdev_io2); 5181 5182 spdk_put_io_channel(ch); 5183 5184 poll_threads(); 5185 5186 rc = bdev_nvme_delete("nvme0", &g_any_path); 5187 CU_ASSERT(rc == 0); 5188 5189 poll_threads(); 5190 spdk_delay_us(1000); 5191 poll_threads(); 5192 5193 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5194 } 5195 5196 static void 5197 test_reconnect_ctrlr(void) 5198 { 5199 struct spdk_nvme_transport_id trid = {}; 5200 struct spdk_nvme_ctrlr ctrlr = {}; 5201 struct nvme_ctrlr *nvme_ctrlr; 5202 struct spdk_io_channel *ch1, *ch2; 5203 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5204 int rc; 5205 5206 ut_init_trid(&trid); 5207 TAILQ_INIT(&ctrlr.active_io_qpairs); 5208 5209 set_thread(0); 5210 5211 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5212 CU_ASSERT(rc == 0); 5213 5214 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5215 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5216 5217 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5218 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5219 5220 ch1 = spdk_get_io_channel(nvme_ctrlr); 5221 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5222 5223 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5224 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5225 5226 set_thread(1); 5227 5228 ch2 = spdk_get_io_channel(nvme_ctrlr); 5229 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5230 5231 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5232 5233 /* Reset starts from thread 1. */ 5234 set_thread(1); 5235 5236 /* The reset should fail and a reconnect timer should be registered. */ 5237 ctrlr.fail_reset = true; 5238 ctrlr.is_failed = true; 5239 5240 rc = bdev_nvme_reset(nvme_ctrlr); 5241 CU_ASSERT(rc == 0); 5242 CU_ASSERT(nvme_ctrlr->resetting == true); 5243 CU_ASSERT(ctrlr.is_failed == true); 5244 5245 poll_threads(); 5246 5247 CU_ASSERT(nvme_ctrlr->resetting == false); 5248 CU_ASSERT(ctrlr.is_failed == false); 5249 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5250 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5251 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5252 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5253 5254 /* Then a reconnect retry should suceeed. */ 5255 ctrlr.fail_reset = false; 5256 5257 spdk_delay_us(SPDK_SEC_TO_USEC); 5258 poll_thread_times(0, 1); 5259 5260 CU_ASSERT(nvme_ctrlr->resetting == true); 5261 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5262 5263 poll_threads(); 5264 5265 CU_ASSERT(nvme_ctrlr->resetting == false); 5266 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5267 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5268 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5269 5270 /* The reset should fail and a reconnect timer should be registered. */ 5271 ctrlr.fail_reset = true; 5272 ctrlr.is_failed = true; 5273 5274 rc = bdev_nvme_reset(nvme_ctrlr); 5275 CU_ASSERT(rc == 0); 5276 CU_ASSERT(nvme_ctrlr->resetting == true); 5277 CU_ASSERT(ctrlr.is_failed == true); 5278 5279 poll_threads(); 5280 5281 CU_ASSERT(nvme_ctrlr->resetting == false); 5282 CU_ASSERT(ctrlr.is_failed == false); 5283 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5284 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5285 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5286 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5287 5288 /* Then a reconnect retry should still fail. */ 5289 spdk_delay_us(SPDK_SEC_TO_USEC); 5290 poll_thread_times(0, 1); 5291 5292 CU_ASSERT(nvme_ctrlr->resetting == true); 5293 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5294 5295 poll_threads(); 5296 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5297 poll_threads(); 5298 5299 CU_ASSERT(nvme_ctrlr->resetting == false); 5300 CU_ASSERT(ctrlr.is_failed == false); 5301 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5302 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5303 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5304 5305 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5306 spdk_delay_us(SPDK_SEC_TO_USEC); 5307 poll_threads(); 5308 5309 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5310 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5311 CU_ASSERT(nvme_ctrlr->destruct == true); 5312 5313 spdk_put_io_channel(ch2); 5314 5315 set_thread(0); 5316 5317 spdk_put_io_channel(ch1); 5318 5319 poll_threads(); 5320 spdk_delay_us(1000); 5321 poll_threads(); 5322 5323 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5324 } 5325 5326 static struct nvme_path_id * 5327 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5328 const struct spdk_nvme_transport_id *trid) 5329 { 5330 struct nvme_path_id *p; 5331 5332 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5333 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5334 break; 5335 } 5336 } 5337 5338 return p; 5339 } 5340 5341 static void 5342 test_retry_failover_ctrlr(void) 5343 { 5344 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5345 struct spdk_nvme_ctrlr ctrlr = {}; 5346 struct nvme_ctrlr *nvme_ctrlr = NULL; 5347 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5348 struct spdk_io_channel *ch; 5349 struct nvme_ctrlr_channel *ctrlr_ch; 5350 int rc; 5351 5352 ut_init_trid(&trid1); 5353 ut_init_trid2(&trid2); 5354 ut_init_trid3(&trid3); 5355 TAILQ_INIT(&ctrlr.active_io_qpairs); 5356 5357 set_thread(0); 5358 5359 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5360 CU_ASSERT(rc == 0); 5361 5362 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5363 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5364 5365 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5366 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5367 5368 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5369 CU_ASSERT(rc == 0); 5370 5371 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5372 CU_ASSERT(rc == 0); 5373 5374 ch = spdk_get_io_channel(nvme_ctrlr); 5375 SPDK_CU_ASSERT_FATAL(ch != NULL); 5376 5377 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5378 5379 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5380 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5381 CU_ASSERT(path_id1->is_failed == false); 5382 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5383 5384 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5385 ctrlr.fail_reset = true; 5386 ctrlr.is_failed = true; 5387 5388 rc = bdev_nvme_reset(nvme_ctrlr); 5389 CU_ASSERT(rc == 0); 5390 5391 poll_threads(); 5392 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5393 poll_threads(); 5394 5395 CU_ASSERT(nvme_ctrlr->resetting == false); 5396 CU_ASSERT(ctrlr.is_failed == false); 5397 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5398 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5399 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5400 CU_ASSERT(path_id1->is_failed == true); 5401 5402 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5403 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5404 CU_ASSERT(path_id2->is_failed == false); 5405 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5406 5407 /* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is 5408 * switched to trid3 but reset is not started. 5409 */ 5410 rc = bdev_nvme_failover(nvme_ctrlr, true); 5411 CU_ASSERT(rc == 0); 5412 5413 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL); 5414 5415 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5416 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5417 CU_ASSERT(path_id3->is_failed == false); 5418 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5419 5420 CU_ASSERT(nvme_ctrlr->resetting == false); 5421 5422 /* If reconnect succeeds, trid3 should be the active path_id */ 5423 ctrlr.fail_reset = false; 5424 5425 spdk_delay_us(SPDK_SEC_TO_USEC); 5426 poll_thread_times(0, 1); 5427 5428 CU_ASSERT(nvme_ctrlr->resetting == true); 5429 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5430 5431 poll_threads(); 5432 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5433 poll_threads(); 5434 5435 CU_ASSERT(path_id3->is_failed == false); 5436 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5437 CU_ASSERT(nvme_ctrlr->resetting == false); 5438 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5439 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5440 5441 spdk_put_io_channel(ch); 5442 5443 poll_threads(); 5444 5445 rc = bdev_nvme_delete("nvme0", &g_any_path); 5446 CU_ASSERT(rc == 0); 5447 5448 poll_threads(); 5449 spdk_delay_us(1000); 5450 poll_threads(); 5451 5452 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5453 } 5454 5455 static void 5456 test_fail_path(void) 5457 { 5458 struct nvme_path_id path = {}; 5459 struct nvme_ctrlr_opts opts = {}; 5460 struct spdk_nvme_ctrlr *ctrlr; 5461 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5462 struct nvme_ctrlr *nvme_ctrlr; 5463 const int STRING_SIZE = 32; 5464 const char *attached_names[STRING_SIZE]; 5465 struct nvme_bdev *bdev; 5466 struct nvme_ns *nvme_ns; 5467 struct spdk_bdev_io *bdev_io; 5468 struct spdk_io_channel *ch; 5469 struct nvme_bdev_channel *nbdev_ch; 5470 struct nvme_io_path *io_path; 5471 struct nvme_ctrlr_channel *ctrlr_ch; 5472 int rc; 5473 5474 /* The test scenario is the following. 5475 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5476 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5477 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5478 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5479 * comes first. The queued I/O is failed. 5480 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5481 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5482 */ 5483 5484 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5485 ut_init_trid(&path.trid); 5486 5487 set_thread(0); 5488 5489 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5490 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5491 5492 g_ut_attach_ctrlr_status = 0; 5493 g_ut_attach_bdev_count = 1; 5494 5495 opts.ctrlr_loss_timeout_sec = 4; 5496 opts.reconnect_delay_sec = 1; 5497 opts.fast_io_fail_timeout_sec = 2; 5498 5499 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5500 attach_ctrlr_done, NULL, NULL, &opts, false); 5501 CU_ASSERT(rc == 0); 5502 5503 spdk_delay_us(1000); 5504 poll_threads(); 5505 5506 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5507 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5508 5509 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5510 CU_ASSERT(nvme_ctrlr != NULL); 5511 5512 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5513 CU_ASSERT(bdev != NULL); 5514 5515 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5516 CU_ASSERT(nvme_ns != NULL); 5517 5518 ch = spdk_get_io_channel(bdev); 5519 SPDK_CU_ASSERT_FATAL(ch != NULL); 5520 5521 nbdev_ch = spdk_io_channel_get_ctx(ch); 5522 5523 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5524 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5525 5526 ctrlr_ch = io_path->qpair->ctrlr_ch; 5527 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5528 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5529 5530 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5531 ut_bdev_io_set_buf(bdev_io); 5532 5533 5534 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5535 ctrlr->fail_reset = true; 5536 ctrlr->is_failed = true; 5537 5538 rc = bdev_nvme_reset(nvme_ctrlr); 5539 CU_ASSERT(rc == 0); 5540 CU_ASSERT(nvme_ctrlr->resetting == true); 5541 CU_ASSERT(ctrlr->is_failed == true); 5542 5543 poll_threads(); 5544 5545 CU_ASSERT(nvme_ctrlr->resetting == false); 5546 CU_ASSERT(ctrlr->is_failed == false); 5547 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5548 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5549 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5550 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5551 5552 /* I/O should be queued. */ 5553 bdev_io->internal.in_submit_request = true; 5554 5555 bdev_nvme_submit_request(ch, bdev_io); 5556 5557 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5558 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5559 5560 /* After a second, the I/O should be still queued and the ctrlr should be 5561 * still recovering. 5562 */ 5563 spdk_delay_us(SPDK_SEC_TO_USEC); 5564 poll_threads(); 5565 5566 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5567 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5568 5569 CU_ASSERT(nvme_ctrlr->resetting == false); 5570 CU_ASSERT(ctrlr->is_failed == false); 5571 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5572 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5573 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5574 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5575 5576 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5577 5578 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5579 spdk_delay_us(SPDK_SEC_TO_USEC); 5580 poll_threads(); 5581 5582 CU_ASSERT(nvme_ctrlr->resetting == false); 5583 CU_ASSERT(ctrlr->is_failed == false); 5584 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5585 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5586 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5587 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5588 5589 /* Then within a second, pending I/O should be failed. */ 5590 spdk_delay_us(SPDK_SEC_TO_USEC); 5591 poll_threads(); 5592 5593 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5594 poll_threads(); 5595 5596 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5597 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5598 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5599 5600 /* Another I/O submission should be failed immediately. */ 5601 bdev_io->internal.in_submit_request = true; 5602 5603 bdev_nvme_submit_request(ch, bdev_io); 5604 5605 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5606 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5607 5608 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5609 * be deleted. 5610 */ 5611 spdk_delay_us(SPDK_SEC_TO_USEC); 5612 poll_threads(); 5613 5614 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5615 poll_threads(); 5616 5617 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5618 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5619 CU_ASSERT(nvme_ctrlr->destruct == true); 5620 5621 spdk_put_io_channel(ch); 5622 5623 poll_threads(); 5624 spdk_delay_us(1000); 5625 poll_threads(); 5626 5627 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5628 5629 free(bdev_io); 5630 } 5631 5632 static void 5633 test_nvme_ns_cmp(void) 5634 { 5635 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5636 5637 nvme_ns1.id = 0; 5638 nvme_ns2.id = UINT32_MAX; 5639 5640 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5641 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5642 } 5643 5644 static void 5645 test_ana_transition(void) 5646 { 5647 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5648 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5649 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5650 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5651 5652 /* case 1: ANA transition timedout is canceled. */ 5653 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5654 nvme_ns.ana_transition_timedout = true; 5655 5656 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5657 5658 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5659 5660 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5661 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5662 5663 /* case 2: ANATT timer is kept. */ 5664 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5665 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5666 &nvme_ns, 5667 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5668 5669 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5670 5671 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5672 5673 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5674 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5675 5676 /* case 3: ANATT timer is stopped. */ 5677 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5678 5679 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5680 5681 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5682 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5683 5684 /* ANATT timer is started. */ 5685 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5686 5687 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5688 5689 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5690 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5691 5692 /* ANATT timer is expired. */ 5693 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5694 5695 poll_threads(); 5696 5697 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5698 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5699 } 5700 5701 static void 5702 _set_preferred_path_cb(void *cb_arg, int rc) 5703 { 5704 bool *done = cb_arg; 5705 5706 *done = true; 5707 } 5708 5709 static void 5710 test_set_preferred_path(void) 5711 { 5712 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5713 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5714 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5715 const int STRING_SIZE = 32; 5716 const char *attached_names[STRING_SIZE]; 5717 struct nvme_bdev *bdev; 5718 struct spdk_io_channel *ch; 5719 struct nvme_bdev_channel *nbdev_ch; 5720 struct nvme_io_path *io_path; 5721 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5722 const struct spdk_nvme_ctrlr_data *cdata; 5723 bool done; 5724 int rc; 5725 5726 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5727 ut_init_trid(&path1.trid); 5728 ut_init_trid2(&path2.trid); 5729 ut_init_trid3(&path3.trid); 5730 g_ut_attach_ctrlr_status = 0; 5731 g_ut_attach_bdev_count = 1; 5732 5733 set_thread(0); 5734 5735 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5736 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5737 5738 ctrlr1->ns[0].uuid = &uuid1; 5739 5740 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5741 attach_ctrlr_done, NULL, NULL, NULL, true); 5742 CU_ASSERT(rc == 0); 5743 5744 spdk_delay_us(1000); 5745 poll_threads(); 5746 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5747 poll_threads(); 5748 5749 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5750 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5751 5752 ctrlr2->ns[0].uuid = &uuid1; 5753 5754 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5755 attach_ctrlr_done, NULL, NULL, NULL, true); 5756 CU_ASSERT(rc == 0); 5757 5758 spdk_delay_us(1000); 5759 poll_threads(); 5760 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5761 poll_threads(); 5762 5763 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5764 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5765 5766 ctrlr3->ns[0].uuid = &uuid1; 5767 5768 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5769 attach_ctrlr_done, NULL, NULL, NULL, true); 5770 CU_ASSERT(rc == 0); 5771 5772 spdk_delay_us(1000); 5773 poll_threads(); 5774 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5775 poll_threads(); 5776 5777 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5778 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5779 5780 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5781 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5782 5783 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5784 5785 ch = spdk_get_io_channel(bdev); 5786 SPDK_CU_ASSERT_FATAL(ch != NULL); 5787 nbdev_ch = spdk_io_channel_get_ctx(ch); 5788 5789 io_path = bdev_nvme_find_io_path(nbdev_ch); 5790 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5791 5792 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5793 5794 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5795 * should return io_path to ctrlr2. 5796 */ 5797 5798 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5799 done = false; 5800 5801 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5802 5803 poll_threads(); 5804 CU_ASSERT(done == true); 5805 5806 io_path = bdev_nvme_find_io_path(nbdev_ch); 5807 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5808 5809 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5810 5811 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5812 * acquired, find_io_path() should return io_path to ctrlr3. 5813 */ 5814 5815 spdk_put_io_channel(ch); 5816 5817 poll_threads(); 5818 5819 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5820 done = false; 5821 5822 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5823 5824 poll_threads(); 5825 CU_ASSERT(done == true); 5826 5827 ch = spdk_get_io_channel(bdev); 5828 SPDK_CU_ASSERT_FATAL(ch != NULL); 5829 nbdev_ch = spdk_io_channel_get_ctx(ch); 5830 5831 io_path = bdev_nvme_find_io_path(nbdev_ch); 5832 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5833 5834 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5835 5836 spdk_put_io_channel(ch); 5837 5838 poll_threads(); 5839 5840 rc = bdev_nvme_delete("nvme0", &g_any_path); 5841 CU_ASSERT(rc == 0); 5842 5843 poll_threads(); 5844 spdk_delay_us(1000); 5845 poll_threads(); 5846 5847 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5848 } 5849 5850 static void 5851 test_find_next_io_path(void) 5852 { 5853 struct nvme_bdev_channel nbdev_ch = { 5854 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5855 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5856 .mp_selector = BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 5857 }; 5858 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5859 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5860 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5861 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5862 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5863 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5864 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5865 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5866 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5867 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5868 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5869 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 5870 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5871 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5872 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5873 5874 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 5875 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 5876 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 5877 5878 /* test the case when nbdev_ch->current_io_path is filled, the case of current_io_path = NULL 5879 * is covered in test_find_io_path. 5880 */ 5881 5882 nbdev_ch.current_io_path = &io_path2; 5883 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5884 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5885 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5886 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5887 5888 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5889 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5890 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5891 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5892 5893 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5894 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5895 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5896 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5897 5898 nbdev_ch.current_io_path = &io_path3; 5899 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5900 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5901 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5902 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5903 5904 /* Test if next io_path is selected according to rr_min_io */ 5905 5906 nbdev_ch.current_io_path = NULL; 5907 nbdev_ch.rr_min_io = 2; 5908 nbdev_ch.rr_counter = 0; 5909 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5910 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5911 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5912 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5913 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5914 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5915 5916 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5917 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5918 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5919 } 5920 5921 static void 5922 test_find_io_path_min_qd(void) 5923 { 5924 struct nvme_bdev_channel nbdev_ch = { 5925 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5926 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5927 .mp_selector = BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, 5928 }; 5929 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5930 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5931 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5932 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5933 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5934 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5935 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5936 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5937 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5938 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5939 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5940 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 5941 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5942 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5943 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5944 5945 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 5946 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 5947 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 5948 5949 /* Test if the minumum io_outstanding or the ANA optimized state is 5950 * prioritized when using least queue depth selector 5951 */ 5952 qpair1.num_outstanding_reqs = 2; 5953 qpair2.num_outstanding_reqs = 1; 5954 qpair3.num_outstanding_reqs = 0; 5955 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5956 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5957 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5958 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5959 5960 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5961 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5962 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5963 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5964 5965 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5966 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5967 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5968 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5969 5970 qpair2.num_outstanding_reqs = 4; 5971 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5972 } 5973 5974 static void 5975 test_disable_auto_failback(void) 5976 { 5977 struct nvme_path_id path1 = {}, path2 = {}; 5978 struct nvme_ctrlr_opts opts = {}; 5979 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 5980 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5981 struct nvme_ctrlr *nvme_ctrlr1; 5982 const int STRING_SIZE = 32; 5983 const char *attached_names[STRING_SIZE]; 5984 struct nvme_bdev *bdev; 5985 struct spdk_io_channel *ch; 5986 struct nvme_bdev_channel *nbdev_ch; 5987 struct nvme_io_path *io_path; 5988 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5989 const struct spdk_nvme_ctrlr_data *cdata; 5990 bool done; 5991 int rc; 5992 5993 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5994 ut_init_trid(&path1.trid); 5995 ut_init_trid2(&path2.trid); 5996 g_ut_attach_ctrlr_status = 0; 5997 g_ut_attach_bdev_count = 1; 5998 5999 g_opts.disable_auto_failback = true; 6000 6001 opts.ctrlr_loss_timeout_sec = -1; 6002 opts.reconnect_delay_sec = 1; 6003 6004 set_thread(0); 6005 6006 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6007 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6008 6009 ctrlr1->ns[0].uuid = &uuid1; 6010 6011 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6012 attach_ctrlr_done, NULL, NULL, &opts, true); 6013 CU_ASSERT(rc == 0); 6014 6015 spdk_delay_us(1000); 6016 poll_threads(); 6017 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6018 poll_threads(); 6019 6020 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6021 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6022 6023 ctrlr2->ns[0].uuid = &uuid1; 6024 6025 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6026 attach_ctrlr_done, NULL, NULL, &opts, true); 6027 CU_ASSERT(rc == 0); 6028 6029 spdk_delay_us(1000); 6030 poll_threads(); 6031 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6032 poll_threads(); 6033 6034 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6035 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6036 6037 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6038 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6039 6040 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6041 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6042 6043 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 6044 6045 ch = spdk_get_io_channel(bdev); 6046 SPDK_CU_ASSERT_FATAL(ch != NULL); 6047 nbdev_ch = spdk_io_channel_get_ctx(ch); 6048 6049 io_path = bdev_nvme_find_io_path(nbdev_ch); 6050 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6051 6052 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6053 6054 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 6055 ctrlr1->fail_reset = true; 6056 ctrlr1->is_failed = true; 6057 6058 bdev_nvme_reset(nvme_ctrlr1); 6059 6060 poll_threads(); 6061 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6062 poll_threads(); 6063 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6064 poll_threads(); 6065 6066 CU_ASSERT(ctrlr1->adminq.is_connected == false); 6067 6068 io_path = bdev_nvme_find_io_path(nbdev_ch); 6069 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6070 6071 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6072 6073 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 6074 * Hence, io_path to ctrlr2 should still be used. 6075 */ 6076 ctrlr1->fail_reset = false; 6077 6078 spdk_delay_us(SPDK_SEC_TO_USEC); 6079 poll_threads(); 6080 6081 CU_ASSERT(ctrlr1->adminq.is_connected == true); 6082 6083 io_path = bdev_nvme_find_io_path(nbdev_ch); 6084 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6085 6086 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 6087 6088 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 6089 * be used again. 6090 */ 6091 6092 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6093 done = false; 6094 6095 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6096 6097 poll_threads(); 6098 CU_ASSERT(done == true); 6099 6100 io_path = bdev_nvme_find_io_path(nbdev_ch); 6101 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6102 6103 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6104 6105 spdk_put_io_channel(ch); 6106 6107 poll_threads(); 6108 6109 rc = bdev_nvme_delete("nvme0", &g_any_path); 6110 CU_ASSERT(rc == 0); 6111 6112 poll_threads(); 6113 spdk_delay_us(1000); 6114 poll_threads(); 6115 6116 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6117 6118 g_opts.disable_auto_failback = false; 6119 } 6120 6121 static void 6122 ut_set_multipath_policy_done(void *cb_arg, int rc) 6123 { 6124 int *done = cb_arg; 6125 6126 SPDK_CU_ASSERT_FATAL(done != NULL); 6127 *done = rc; 6128 } 6129 6130 static void 6131 test_set_multipath_policy(void) 6132 { 6133 struct nvme_path_id path1 = {}, path2 = {}; 6134 struct nvme_ctrlr_opts opts = {}; 6135 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6136 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6137 const int STRING_SIZE = 32; 6138 const char *attached_names[STRING_SIZE]; 6139 struct nvme_bdev *bdev; 6140 struct spdk_io_channel *ch; 6141 struct nvme_bdev_channel *nbdev_ch; 6142 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6143 int done; 6144 int rc; 6145 6146 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6147 ut_init_trid(&path1.trid); 6148 ut_init_trid2(&path2.trid); 6149 g_ut_attach_ctrlr_status = 0; 6150 g_ut_attach_bdev_count = 1; 6151 6152 g_opts.disable_auto_failback = true; 6153 6154 opts.ctrlr_loss_timeout_sec = -1; 6155 opts.reconnect_delay_sec = 1; 6156 6157 set_thread(0); 6158 6159 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6160 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6161 6162 ctrlr1->ns[0].uuid = &uuid1; 6163 6164 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6165 attach_ctrlr_done, NULL, NULL, &opts, true); 6166 CU_ASSERT(rc == 0); 6167 6168 spdk_delay_us(1000); 6169 poll_threads(); 6170 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6171 poll_threads(); 6172 6173 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6174 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6175 6176 ctrlr2->ns[0].uuid = &uuid1; 6177 6178 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6179 attach_ctrlr_done, NULL, NULL, &opts, true); 6180 CU_ASSERT(rc == 0); 6181 6182 spdk_delay_us(1000); 6183 poll_threads(); 6184 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6185 poll_threads(); 6186 6187 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6188 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6189 6190 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6191 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6192 6193 /* If multipath policy is updated before getting any I/O channel, 6194 * an new I/O channel should have the update. 6195 */ 6196 done = -1; 6197 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6198 BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH, UINT32_MAX, 6199 ut_set_multipath_policy_done, &done); 6200 poll_threads(); 6201 CU_ASSERT(done == 0); 6202 6203 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6204 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6205 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6206 6207 ch = spdk_get_io_channel(bdev); 6208 SPDK_CU_ASSERT_FATAL(ch != NULL); 6209 nbdev_ch = spdk_io_channel_get_ctx(ch); 6210 6211 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6212 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_QUEUE_DEPTH); 6213 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6214 6215 /* If multipath policy is updated while a I/O channel is active, 6216 * the update should be applied to the I/O channel immediately. 6217 */ 6218 done = -1; 6219 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6220 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, UINT32_MAX, 6221 ut_set_multipath_policy_done, &done); 6222 poll_threads(); 6223 CU_ASSERT(done == 0); 6224 6225 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6226 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6227 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6228 CU_ASSERT(nbdev_ch->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6229 CU_ASSERT(bdev->rr_min_io == UINT32_MAX); 6230 CU_ASSERT(nbdev_ch->rr_min_io == UINT32_MAX); 6231 6232 spdk_put_io_channel(ch); 6233 6234 poll_threads(); 6235 6236 rc = bdev_nvme_delete("nvme0", &g_any_path); 6237 CU_ASSERT(rc == 0); 6238 6239 poll_threads(); 6240 spdk_delay_us(1000); 6241 poll_threads(); 6242 6243 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6244 } 6245 6246 static void 6247 test_uuid_generation(void) 6248 { 6249 uint32_t nsid1 = 1, nsid2 = 2; 6250 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6251 char sn3[21] = " "; 6252 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6253 struct spdk_uuid uuid1, uuid2; 6254 6255 /* Test case 1: 6256 * Serial numbers are the same, nsids are different. 6257 * Compare two generated UUID - they should be different. */ 6258 uuid1 = nvme_generate_uuid(sn1, nsid1); 6259 uuid2 = nvme_generate_uuid(sn1, nsid2); 6260 6261 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6262 6263 /* Test case 2: 6264 * Serial numbers differ only by one character, nsids are the same. 6265 * Compare two generated UUID - they should be different. */ 6266 uuid1 = nvme_generate_uuid(sn1, nsid1); 6267 uuid2 = nvme_generate_uuid(sn2, nsid1); 6268 6269 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6270 6271 /* Test case 3: 6272 * Serial number comprises only of space characters. 6273 * Validate the generated UUID. */ 6274 uuid1 = nvme_generate_uuid(sn3, nsid1); 6275 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6276 } 6277 6278 static void 6279 test_retry_io_to_same_path(void) 6280 { 6281 struct nvme_path_id path1 = {}, path2 = {}; 6282 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6283 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6284 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 6285 const int STRING_SIZE = 32; 6286 const char *attached_names[STRING_SIZE]; 6287 struct nvme_bdev *bdev; 6288 struct spdk_bdev_io *bdev_io; 6289 struct nvme_bdev_io *bio; 6290 struct spdk_io_channel *ch; 6291 struct nvme_bdev_channel *nbdev_ch; 6292 struct nvme_io_path *io_path1, *io_path2; 6293 struct ut_nvme_req *req; 6294 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6295 int done; 6296 int rc; 6297 6298 g_opts.nvme_ioq_poll_period_us = 1; 6299 6300 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6301 ut_init_trid(&path1.trid); 6302 ut_init_trid2(&path2.trid); 6303 g_ut_attach_ctrlr_status = 0; 6304 g_ut_attach_bdev_count = 1; 6305 6306 set_thread(0); 6307 6308 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6309 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6310 6311 ctrlr1->ns[0].uuid = &uuid1; 6312 6313 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6314 attach_ctrlr_done, NULL, NULL, NULL, true); 6315 CU_ASSERT(rc == 0); 6316 6317 spdk_delay_us(1000); 6318 poll_threads(); 6319 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6320 poll_threads(); 6321 6322 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6323 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6324 6325 ctrlr2->ns[0].uuid = &uuid1; 6326 6327 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6328 attach_ctrlr_done, NULL, NULL, NULL, true); 6329 CU_ASSERT(rc == 0); 6330 6331 spdk_delay_us(1000); 6332 poll_threads(); 6333 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6334 poll_threads(); 6335 6336 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6337 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6338 6339 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 6340 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 6341 6342 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 6343 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 6344 6345 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6346 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6347 6348 done = -1; 6349 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6350 BDEV_NVME_MP_SELECTOR_ROUND_ROBIN, 1, ut_set_multipath_policy_done, &done); 6351 poll_threads(); 6352 CU_ASSERT(done == 0); 6353 6354 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6355 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6356 CU_ASSERT(bdev->rr_min_io == 1); 6357 6358 ch = spdk_get_io_channel(bdev); 6359 SPDK_CU_ASSERT_FATAL(ch != NULL); 6360 nbdev_ch = spdk_io_channel_get_ctx(ch); 6361 6362 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6363 CU_ASSERT(bdev->mp_selector == BDEV_NVME_MP_SELECTOR_ROUND_ROBIN); 6364 CU_ASSERT(nbdev_ch->rr_min_io == 1); 6365 6366 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 6367 ut_bdev_io_set_buf(bdev_io); 6368 6369 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 6370 6371 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 6372 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 6373 6374 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 6375 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 6376 6377 /* The 1st I/O should be submitted to io_path1. */ 6378 bdev_io->internal.in_submit_request = true; 6379 6380 bdev_nvme_submit_request(ch, bdev_io); 6381 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6382 CU_ASSERT(bio->io_path == io_path1); 6383 CU_ASSERT(io_path1->qpair->qpair->num_outstanding_reqs == 1); 6384 6385 spdk_delay_us(1); 6386 6387 poll_threads(); 6388 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6389 CU_ASSERT(bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 6390 6391 /* The 2nd I/O should be submitted to io_path2 because the path selection 6392 * policy is round-robin. 6393 */ 6394 bdev_io->internal.in_submit_request = true; 6395 6396 bdev_nvme_submit_request(ch, bdev_io); 6397 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6398 CU_ASSERT(bio->io_path == io_path2); 6399 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6400 6401 req = ut_get_outstanding_nvme_request(io_path2->qpair->qpair, bio); 6402 SPDK_CU_ASSERT_FATAL(req != NULL); 6403 6404 /* Set retry count to non-zero. */ 6405 g_opts.bdev_retry_count = 1; 6406 6407 /* Inject an I/O error. */ 6408 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 6409 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 6410 6411 /* The 2nd I/O should be queued to nbdev_ch. */ 6412 spdk_delay_us(1); 6413 poll_thread_times(0, 1); 6414 6415 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6416 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6417 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 6418 6419 /* The 2nd I/O should keep caching io_path2. */ 6420 CU_ASSERT(bio->io_path == io_path2); 6421 6422 /* The 2nd I/O should be submitted to io_path2 again. */ 6423 poll_thread_times(0, 1); 6424 6425 CU_ASSERT(bdev_io->internal.in_submit_request == true); 6426 CU_ASSERT(bio->io_path == io_path2); 6427 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 1); 6428 6429 spdk_delay_us(1); 6430 poll_threads(); 6431 6432 CU_ASSERT(io_path2->qpair->qpair->num_outstanding_reqs == 0); 6433 CU_ASSERT(bdev_io->internal.in_submit_request == false); 6434 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 6435 6436 free(bdev_io); 6437 6438 spdk_put_io_channel(ch); 6439 6440 poll_threads(); 6441 spdk_delay_us(1); 6442 poll_threads(); 6443 6444 rc = bdev_nvme_delete("nvme0", &g_any_path); 6445 CU_ASSERT(rc == 0); 6446 6447 poll_threads(); 6448 spdk_delay_us(1000); 6449 poll_threads(); 6450 6451 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 6452 6453 g_opts.nvme_ioq_poll_period_us = 0; 6454 g_opts.bdev_retry_count = 0; 6455 } 6456 6457 int 6458 main(int argc, const char **argv) 6459 { 6460 CU_pSuite suite = NULL; 6461 unsigned int num_failures; 6462 6463 CU_set_error_action(CUEA_ABORT); 6464 CU_initialize_registry(); 6465 6466 suite = CU_add_suite("nvme", NULL, NULL); 6467 6468 CU_ADD_TEST(suite, test_create_ctrlr); 6469 CU_ADD_TEST(suite, test_reset_ctrlr); 6470 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 6471 CU_ADD_TEST(suite, test_failover_ctrlr); 6472 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 6473 CU_ADD_TEST(suite, test_pending_reset); 6474 CU_ADD_TEST(suite, test_attach_ctrlr); 6475 CU_ADD_TEST(suite, test_aer_cb); 6476 CU_ADD_TEST(suite, test_submit_nvme_cmd); 6477 CU_ADD_TEST(suite, test_add_remove_trid); 6478 CU_ADD_TEST(suite, test_abort); 6479 CU_ADD_TEST(suite, test_get_io_qpair); 6480 CU_ADD_TEST(suite, test_bdev_unregister); 6481 CU_ADD_TEST(suite, test_compare_ns); 6482 CU_ADD_TEST(suite, test_init_ana_log_page); 6483 CU_ADD_TEST(suite, test_get_memory_domains); 6484 CU_ADD_TEST(suite, test_reconnect_qpair); 6485 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 6486 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 6487 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 6488 CU_ADD_TEST(suite, test_admin_path); 6489 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 6490 CU_ADD_TEST(suite, test_find_io_path); 6491 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 6492 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 6493 CU_ADD_TEST(suite, test_retry_io_count); 6494 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 6495 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 6496 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 6497 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 6498 CU_ADD_TEST(suite, test_reconnect_ctrlr); 6499 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 6500 CU_ADD_TEST(suite, test_fail_path); 6501 CU_ADD_TEST(suite, test_nvme_ns_cmp); 6502 CU_ADD_TEST(suite, test_ana_transition); 6503 CU_ADD_TEST(suite, test_set_preferred_path); 6504 CU_ADD_TEST(suite, test_find_next_io_path); 6505 CU_ADD_TEST(suite, test_find_io_path_min_qd); 6506 CU_ADD_TEST(suite, test_disable_auto_failback); 6507 CU_ADD_TEST(suite, test_set_multipath_policy); 6508 CU_ADD_TEST(suite, test_uuid_generation); 6509 CU_ADD_TEST(suite, test_retry_io_to_same_path); 6510 6511 CU_basic_set_mode(CU_BRM_VERBOSE); 6512 6513 allocate_threads(3); 6514 set_thread(0); 6515 bdev_nvme_library_init(); 6516 init_accel(); 6517 6518 CU_basic_run_tests(); 6519 6520 set_thread(0); 6521 bdev_nvme_library_fini(); 6522 fini_accel(); 6523 free_threads(); 6524 6525 num_failures = CU_get_number_of_failures(); 6526 CU_cleanup_registry(); 6527 6528 return num_failures; 6529 } 6530