1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2021 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk_cunit.h" 9 #include "spdk/thread.h" 10 #include "spdk/bdev_module.h" 11 #include "spdk/bdev_module.h" 12 13 #include "common/lib/ut_multithread.c" 14 15 #include "bdev/nvme/bdev_nvme.c" 16 17 #include "unit/lib/json_mock.c" 18 19 static void *g_accel_p = (void *)0xdeadbeaf; 20 21 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 22 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 23 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 24 spdk_nvme_remove_cb remove_cb), NULL); 25 26 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 27 enum spdk_nvme_transport_type trtype)); 28 29 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 30 NULL); 31 32 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 33 34 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 35 struct spdk_nvme_transport_id *trid), 0); 36 37 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 38 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 39 40 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 41 42 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 43 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 44 45 DEFINE_STUB(spdk_nvme_ctrlr_get_discovery_log_page, int, 46 (struct spdk_nvme_ctrlr *ctrlr, spdk_nvme_discovery_cb cb_fn, void *cb_arg), 0); 47 48 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains, int); 49 int 50 spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 51 struct spdk_memory_domain **domains, int array_size) 52 { 53 int i, min_array_size; 54 55 if (ut_spdk_nvme_ctrlr_get_memory_domains > 0 && domains && array_size > 0) { 56 min_array_size = spdk_min(ut_spdk_nvme_ctrlr_get_memory_domains, array_size); 57 for (i = 0; i < min_array_size; i++) { 58 domains[i] = (struct spdk_memory_domain *)0xf1f2f3f4f5; 59 } 60 } 61 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domains); 62 63 return 0; 64 } 65 66 struct spdk_io_channel * 67 spdk_accel_get_io_channel(void) 68 { 69 return spdk_get_io_channel(g_accel_p); 70 } 71 72 void 73 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 74 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 75 { 76 /* Avoid warning that opts is used uninitialised */ 77 memset(opts, 0, opts_size); 78 } 79 80 DEFINE_STUB(spdk_nvme_ctrlr_get_opts, const struct spdk_nvme_ctrlr_opts *, 81 (struct spdk_nvme_ctrlr *ctrlr), NULL); 82 83 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 84 (const struct spdk_nvme_ctrlr *ctrlr), 0); 85 86 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 87 (struct spdk_nvme_ctrlr *ctrlr), NULL); 88 89 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 90 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 91 92 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 93 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 94 95 DEFINE_STUB(spdk_nvme_ctrlr_is_fabrics, bool, (struct spdk_nvme_ctrlr *ctrlr), true); 96 97 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 98 99 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 100 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 101 102 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 103 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 104 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 105 106 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 107 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 108 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 109 110 DEFINE_STUB(spdk_nvme_cuse_get_ctrlr_name, int, (struct spdk_nvme_ctrlr *ctrlr, char *name, 111 size_t *size), 0); 112 113 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 114 115 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 116 117 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 118 119 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 120 121 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 122 123 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 124 125 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 126 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 127 128 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 129 130 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 131 char *name, size_t *size), 0); 132 133 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 134 (struct spdk_nvme_ns *ns), 0); 135 136 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 137 (const struct spdk_nvme_ctrlr *ctrlr), 0); 138 139 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 140 (struct spdk_nvme_ns *ns), 0); 141 142 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 143 (struct spdk_nvme_ns *ns), 0); 144 145 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 146 (struct spdk_nvme_ns *ns), 0); 147 148 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 149 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 150 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 151 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 152 153 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 154 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 155 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 156 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 157 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 158 159 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 160 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 161 void *payload, uint32_t payload_size, uint64_t slba, 162 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 163 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 164 165 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 166 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 167 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 168 169 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 170 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 171 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 172 173 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 174 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 175 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 176 177 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 178 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 179 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 180 181 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 182 183 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 184 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 185 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 186 187 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 188 189 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 190 191 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 192 193 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 194 195 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 196 197 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 198 struct iovec *iov, 199 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 200 201 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr)); 202 203 struct ut_nvme_req { 204 uint16_t opc; 205 spdk_nvme_cmd_cb cb_fn; 206 void *cb_arg; 207 struct spdk_nvme_cpl cpl; 208 TAILQ_ENTRY(ut_nvme_req) tailq; 209 }; 210 211 struct spdk_nvme_ns { 212 struct spdk_nvme_ctrlr *ctrlr; 213 uint32_t id; 214 bool is_active; 215 struct spdk_uuid *uuid; 216 enum spdk_nvme_ana_state ana_state; 217 enum spdk_nvme_csi csi; 218 }; 219 220 struct spdk_nvme_qpair { 221 struct spdk_nvme_ctrlr *ctrlr; 222 uint8_t failure_reason; 223 bool is_connected; 224 bool in_completion_context; 225 bool delete_after_completion_context; 226 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 227 uint32_t num_outstanding_reqs; 228 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 229 struct spdk_nvme_poll_group *poll_group; 230 void *poll_group_tailq_head; 231 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 232 }; 233 234 struct spdk_nvme_ctrlr { 235 uint32_t num_ns; 236 struct spdk_nvme_ns *ns; 237 struct spdk_nvme_ns_data *nsdata; 238 struct spdk_nvme_qpair adminq; 239 struct spdk_nvme_ctrlr_data cdata; 240 bool attached; 241 bool is_failed; 242 bool fail_reset; 243 bool is_removed; 244 struct spdk_nvme_transport_id trid; 245 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 246 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 247 struct spdk_nvme_ctrlr_opts opts; 248 }; 249 250 struct spdk_nvme_poll_group { 251 void *ctx; 252 struct spdk_nvme_accel_fn_table accel_fn_table; 253 TAILQ_HEAD(, spdk_nvme_qpair) connected_qpairs; 254 TAILQ_HEAD(, spdk_nvme_qpair) disconnected_qpairs; 255 }; 256 257 struct spdk_nvme_probe_ctx { 258 struct spdk_nvme_transport_id trid; 259 void *cb_ctx; 260 spdk_nvme_attach_cb attach_cb; 261 struct spdk_nvme_ctrlr *init_ctrlr; 262 }; 263 264 uint32_t 265 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 266 { 267 uint32_t nsid; 268 269 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 270 if (ctrlr->ns[nsid - 1].is_active) { 271 return nsid; 272 } 273 } 274 275 return 0; 276 } 277 278 uint32_t 279 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 280 { 281 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 282 if (ctrlr->ns[nsid - 1].is_active) { 283 return nsid; 284 } 285 } 286 287 return 0; 288 } 289 290 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 291 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 292 g_ut_attached_ctrlrs); 293 static int g_ut_attach_ctrlr_status; 294 static size_t g_ut_attach_bdev_count; 295 static int g_ut_register_bdev_status; 296 static struct spdk_bdev *g_ut_registered_bdev; 297 static uint16_t g_ut_cntlid; 298 static struct nvme_path_id g_any_path = {}; 299 300 static void 301 ut_init_trid(struct spdk_nvme_transport_id *trid) 302 { 303 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 304 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 305 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 306 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 307 } 308 309 static void 310 ut_init_trid2(struct spdk_nvme_transport_id *trid) 311 { 312 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 313 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 314 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 315 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 316 } 317 318 static void 319 ut_init_trid3(struct spdk_nvme_transport_id *trid) 320 { 321 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 322 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 323 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 324 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 325 } 326 327 static int 328 cmp_int(int a, int b) 329 { 330 return a - b; 331 } 332 333 int 334 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 335 const struct spdk_nvme_transport_id *trid2) 336 { 337 int cmp; 338 339 /* We assume trtype is TCP for now. */ 340 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 341 342 cmp = cmp_int(trid1->trtype, trid2->trtype); 343 if (cmp) { 344 return cmp; 345 } 346 347 cmp = strcasecmp(trid1->traddr, trid2->traddr); 348 if (cmp) { 349 return cmp; 350 } 351 352 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 353 if (cmp) { 354 return cmp; 355 } 356 357 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 358 if (cmp) { 359 return cmp; 360 } 361 362 cmp = strcmp(trid1->subnqn, trid2->subnqn); 363 if (cmp) { 364 return cmp; 365 } 366 367 return 0; 368 } 369 370 static struct spdk_nvme_ctrlr * 371 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 372 bool ana_reporting, bool multipath) 373 { 374 struct spdk_nvme_ctrlr *ctrlr; 375 uint32_t i; 376 377 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 378 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 379 /* There is a ctrlr whose trid matches. */ 380 return NULL; 381 } 382 } 383 384 ctrlr = calloc(1, sizeof(*ctrlr)); 385 if (ctrlr == NULL) { 386 return NULL; 387 } 388 389 ctrlr->attached = true; 390 ctrlr->adminq.ctrlr = ctrlr; 391 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 392 ctrlr->adminq.is_connected = true; 393 394 if (num_ns != 0) { 395 ctrlr->num_ns = num_ns; 396 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 397 if (ctrlr->ns == NULL) { 398 free(ctrlr); 399 return NULL; 400 } 401 402 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 403 if (ctrlr->nsdata == NULL) { 404 free(ctrlr->ns); 405 free(ctrlr); 406 return NULL; 407 } 408 409 for (i = 0; i < num_ns; i++) { 410 ctrlr->ns[i].id = i + 1; 411 ctrlr->ns[i].ctrlr = ctrlr; 412 ctrlr->ns[i].is_active = true; 413 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 414 ctrlr->nsdata[i].nsze = 1024; 415 ctrlr->nsdata[i].nmic.can_share = multipath; 416 } 417 418 ctrlr->cdata.nn = num_ns; 419 ctrlr->cdata.mnan = num_ns; 420 ctrlr->cdata.nanagrpid = num_ns; 421 } 422 423 ctrlr->cdata.cntlid = ++g_ut_cntlid; 424 ctrlr->cdata.cmic.multi_ctrlr = multipath; 425 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 426 ctrlr->trid = *trid; 427 TAILQ_INIT(&ctrlr->active_io_qpairs); 428 429 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 430 431 return ctrlr; 432 } 433 434 static void 435 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 436 { 437 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 438 439 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 440 free(ctrlr->nsdata); 441 free(ctrlr->ns); 442 free(ctrlr); 443 } 444 445 static int 446 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 447 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 448 { 449 struct ut_nvme_req *req; 450 451 req = calloc(1, sizeof(*req)); 452 if (req == NULL) { 453 return -ENOMEM; 454 } 455 456 req->opc = opc; 457 req->cb_fn = cb_fn; 458 req->cb_arg = cb_arg; 459 460 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 461 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 462 463 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 464 qpair->num_outstanding_reqs++; 465 466 return 0; 467 } 468 469 static struct ut_nvme_req * 470 ut_get_outstanding_nvme_request(struct spdk_nvme_qpair *qpair, void *cb_arg) 471 { 472 struct ut_nvme_req *req; 473 474 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 475 if (req->cb_arg == cb_arg) { 476 break; 477 } 478 } 479 480 return req; 481 } 482 483 static struct spdk_bdev_io * 484 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 485 struct spdk_io_channel *ch) 486 { 487 struct spdk_bdev_io *bdev_io; 488 489 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 490 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 491 bdev_io->type = type; 492 bdev_io->bdev = &nbdev->disk; 493 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 494 495 return bdev_io; 496 } 497 498 static void 499 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 500 { 501 bdev_io->u.bdev.iovs = &bdev_io->iov; 502 bdev_io->u.bdev.iovcnt = 1; 503 504 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 505 bdev_io->iov.iov_len = 4096; 506 } 507 508 static void 509 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 510 { 511 if (ctrlr->is_failed) { 512 free(ctrlr); 513 return; 514 } 515 516 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 517 if (probe_ctx->cb_ctx) { 518 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 519 } 520 521 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 522 523 if (probe_ctx->attach_cb) { 524 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 525 } 526 } 527 528 int 529 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 530 { 531 struct spdk_nvme_ctrlr *ctrlr, *tmp; 532 533 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 534 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 535 continue; 536 } 537 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 538 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 539 } 540 541 free(probe_ctx); 542 543 return 0; 544 } 545 546 struct spdk_nvme_probe_ctx * 547 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 548 const struct spdk_nvme_ctrlr_opts *opts, 549 spdk_nvme_attach_cb attach_cb) 550 { 551 struct spdk_nvme_probe_ctx *probe_ctx; 552 553 if (trid == NULL) { 554 return NULL; 555 } 556 557 probe_ctx = calloc(1, sizeof(*probe_ctx)); 558 if (probe_ctx == NULL) { 559 return NULL; 560 } 561 562 probe_ctx->trid = *trid; 563 probe_ctx->cb_ctx = (void *)opts; 564 probe_ctx->attach_cb = attach_cb; 565 566 return probe_ctx; 567 } 568 569 int 570 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 571 { 572 if (ctrlr->attached) { 573 ut_detach_ctrlr(ctrlr); 574 } 575 576 return 0; 577 } 578 579 int 580 spdk_nvme_detach_async(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_detach_ctx **ctx) 581 { 582 SPDK_CU_ASSERT_FATAL(ctx != NULL); 583 *(struct spdk_nvme_ctrlr **)ctx = ctrlr; 584 585 return 0; 586 } 587 588 int 589 spdk_nvme_detach_poll_async(struct spdk_nvme_detach_ctx *ctx) 590 { 591 return spdk_nvme_detach((struct spdk_nvme_ctrlr *)ctx); 592 } 593 594 void 595 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 596 { 597 memset(opts, 0, opts_size); 598 599 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 600 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 601 } 602 603 const struct spdk_nvme_ctrlr_data * 604 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 605 { 606 return &ctrlr->cdata; 607 } 608 609 uint32_t 610 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 611 { 612 return ctrlr->num_ns; 613 } 614 615 struct spdk_nvme_ns * 616 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 617 { 618 if (nsid < 1 || nsid > ctrlr->num_ns) { 619 return NULL; 620 } 621 622 return &ctrlr->ns[nsid - 1]; 623 } 624 625 bool 626 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 627 { 628 if (nsid < 1 || nsid > ctrlr->num_ns) { 629 return false; 630 } 631 632 return ctrlr->ns[nsid - 1].is_active; 633 } 634 635 union spdk_nvme_csts_register 636 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 637 { 638 union spdk_nvme_csts_register csts; 639 640 csts.raw = 0; 641 642 return csts; 643 } 644 645 union spdk_nvme_vs_register 646 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 647 { 648 union spdk_nvme_vs_register vs; 649 650 vs.raw = 0; 651 652 return vs; 653 } 654 655 struct spdk_nvme_qpair * 656 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 657 const struct spdk_nvme_io_qpair_opts *user_opts, 658 size_t opts_size) 659 { 660 struct spdk_nvme_qpair *qpair; 661 662 qpair = calloc(1, sizeof(*qpair)); 663 if (qpair == NULL) { 664 return NULL; 665 } 666 667 qpair->ctrlr = ctrlr; 668 TAILQ_INIT(&qpair->outstanding_reqs); 669 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 670 671 return qpair; 672 } 673 674 static void 675 nvme_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair) 676 { 677 struct spdk_nvme_poll_group *group = qpair->poll_group; 678 679 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 680 681 qpair->poll_group_tailq_head = &group->connected_qpairs; 682 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 683 TAILQ_INSERT_TAIL(&group->connected_qpairs, qpair, poll_group_tailq); 684 } 685 686 static void 687 nvme_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair) 688 { 689 struct spdk_nvme_poll_group *group = qpair->poll_group; 690 691 CU_ASSERT(qpair->poll_group_tailq_head == &group->connected_qpairs); 692 693 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 694 TAILQ_REMOVE(&group->connected_qpairs, qpair, poll_group_tailq); 695 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 696 } 697 698 int 699 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 700 struct spdk_nvme_qpair *qpair) 701 { 702 if (qpair->is_connected) { 703 return -EISCONN; 704 } 705 706 qpair->is_connected = true; 707 qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_NONE; 708 709 if (qpair->poll_group) { 710 nvme_poll_group_connect_qpair(qpair); 711 } 712 713 return 0; 714 } 715 716 void 717 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 718 { 719 if (!qpair->is_connected) { 720 return; 721 } 722 723 qpair->is_connected = false; 724 725 if (qpair->poll_group != NULL) { 726 nvme_poll_group_disconnect_qpair(qpair); 727 } 728 } 729 730 int 731 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 732 { 733 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 734 735 if (qpair->in_completion_context) { 736 qpair->delete_after_completion_context = true; 737 return 0; 738 } 739 740 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 741 742 if (qpair->poll_group != NULL) { 743 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 744 } 745 746 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 747 748 CU_ASSERT(qpair->num_outstanding_reqs == 0); 749 750 free(qpair); 751 752 return 0; 753 } 754 755 int 756 spdk_nvme_ctrlr_reconnect_poll_async(struct spdk_nvme_ctrlr *ctrlr) 757 { 758 if (ctrlr->fail_reset) { 759 ctrlr->is_failed = true; 760 return -EIO; 761 } 762 763 ctrlr->adminq.is_connected = true; 764 return 0; 765 } 766 767 void 768 spdk_nvme_ctrlr_reconnect_async(struct spdk_nvme_ctrlr *ctrlr) 769 { 770 } 771 772 int 773 spdk_nvme_ctrlr_disconnect(struct spdk_nvme_ctrlr *ctrlr) 774 { 775 if (ctrlr->is_removed) { 776 return -ENXIO; 777 } 778 779 ctrlr->adminq.is_connected = false; 780 ctrlr->is_failed = false; 781 782 return 0; 783 } 784 785 void 786 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 787 { 788 ctrlr->is_failed = true; 789 } 790 791 bool 792 spdk_nvme_ctrlr_is_failed(struct spdk_nvme_ctrlr *ctrlr) 793 { 794 return ctrlr->is_failed; 795 } 796 797 spdk_nvme_qp_failure_reason 798 spdk_nvme_ctrlr_get_admin_qp_failure_reason(struct spdk_nvme_ctrlr *ctrlr) 799 { 800 return spdk_nvme_qpair_get_failure_reason(&ctrlr->adminq); 801 } 802 803 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 804 sizeof(uint32_t)) 805 static void 806 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 807 { 808 struct spdk_nvme_ana_page ana_hdr; 809 char _ana_desc[UT_ANA_DESC_SIZE]; 810 struct spdk_nvme_ana_group_descriptor *ana_desc; 811 struct spdk_nvme_ns *ns; 812 uint32_t i; 813 814 memset(&ana_hdr, 0, sizeof(ana_hdr)); 815 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 816 817 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 818 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 819 820 buf += sizeof(ana_hdr); 821 length -= sizeof(ana_hdr); 822 823 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 824 825 for (i = 0; i < ctrlr->num_ns; i++) { 826 ns = &ctrlr->ns[i]; 827 828 if (!ns->is_active) { 829 continue; 830 } 831 832 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 833 834 ana_desc->ana_group_id = ns->id; 835 ana_desc->num_of_nsid = 1; 836 ana_desc->ana_state = ns->ana_state; 837 ana_desc->nsid[0] = ns->id; 838 839 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 840 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 841 842 buf += UT_ANA_DESC_SIZE; 843 length -= UT_ANA_DESC_SIZE; 844 } 845 } 846 847 int 848 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 849 uint8_t log_page, uint32_t nsid, 850 void *payload, uint32_t payload_size, 851 uint64_t offset, 852 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 853 { 854 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 855 SPDK_CU_ASSERT_FATAL(offset == 0); 856 ut_create_ana_log_page(ctrlr, payload, payload_size); 857 } 858 859 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 860 cb_fn, cb_arg); 861 } 862 863 int 864 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 865 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 866 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 867 { 868 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 869 } 870 871 int 872 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 873 void *cmd_cb_arg, 874 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 875 { 876 struct ut_nvme_req *req = NULL, *abort_req; 877 878 if (qpair == NULL) { 879 qpair = &ctrlr->adminq; 880 } 881 882 abort_req = calloc(1, sizeof(*abort_req)); 883 if (abort_req == NULL) { 884 return -ENOMEM; 885 } 886 887 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 888 if (req->cb_arg == cmd_cb_arg) { 889 break; 890 } 891 } 892 893 if (req == NULL) { 894 free(abort_req); 895 return -ENOENT; 896 } 897 898 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 899 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 900 901 abort_req->opc = SPDK_NVME_OPC_ABORT; 902 abort_req->cb_fn = cb_fn; 903 abort_req->cb_arg = cb_arg; 904 905 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 906 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 907 abort_req->cpl.cdw0 = 0; 908 909 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 910 ctrlr->adminq.num_outstanding_reqs++; 911 912 return 0; 913 } 914 915 int32_t 916 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 917 { 918 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 919 } 920 921 uint32_t 922 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 923 { 924 return ns->id; 925 } 926 927 struct spdk_nvme_ctrlr * 928 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 929 { 930 return ns->ctrlr; 931 } 932 933 static inline struct spdk_nvme_ns_data * 934 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 935 { 936 return &ns->ctrlr->nsdata[ns->id - 1]; 937 } 938 939 const struct spdk_nvme_ns_data * 940 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 941 { 942 return _nvme_ns_get_data(ns); 943 } 944 945 uint64_t 946 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 947 { 948 return _nvme_ns_get_data(ns)->nsze; 949 } 950 951 const struct spdk_uuid * 952 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 953 { 954 return ns->uuid; 955 } 956 957 enum spdk_nvme_csi 958 spdk_nvme_ns_get_csi(const struct spdk_nvme_ns *ns) { 959 return ns->csi; 960 } 961 962 int 963 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 964 void *metadata, uint64_t lba, uint32_t lba_count, 965 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 966 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 967 { 968 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 969 } 970 971 int 972 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 973 void *buffer, void *metadata, uint64_t lba, 974 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 975 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 976 { 977 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 978 } 979 980 int 981 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 982 uint64_t lba, uint32_t lba_count, 983 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 984 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 985 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 986 uint16_t apptag_mask, uint16_t apptag) 987 { 988 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 989 } 990 991 int 992 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 993 uint64_t lba, uint32_t lba_count, 994 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 995 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 996 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 997 uint16_t apptag_mask, uint16_t apptag) 998 { 999 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1000 } 1001 1002 static bool g_ut_readv_ext_called; 1003 int 1004 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1005 uint64_t lba, uint32_t lba_count, 1006 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1007 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1008 spdk_nvme_req_next_sge_cb next_sge_fn, 1009 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1010 { 1011 g_ut_readv_ext_called = true; 1012 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 1013 } 1014 1015 static bool g_ut_writev_ext_called; 1016 int 1017 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1018 uint64_t lba, uint32_t lba_count, 1019 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1020 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1021 spdk_nvme_req_next_sge_cb next_sge_fn, 1022 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 1023 { 1024 g_ut_writev_ext_called = true; 1025 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 1026 } 1027 1028 int 1029 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1030 uint64_t lba, uint32_t lba_count, 1031 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 1032 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 1033 spdk_nvme_req_next_sge_cb next_sge_fn, 1034 void *metadata, uint16_t apptag_mask, uint16_t apptag) 1035 { 1036 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 1037 } 1038 1039 int 1040 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1041 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 1042 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1043 { 1044 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 1045 } 1046 1047 int 1048 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1049 uint64_t lba, uint32_t lba_count, 1050 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 1051 uint32_t io_flags) 1052 { 1053 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 1054 } 1055 1056 int 1057 spdk_nvme_ns_cmd_copy(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 1058 const struct spdk_nvme_scc_source_range *ranges, 1059 uint16_t num_ranges, uint64_t dest_lba, 1060 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 1061 { 1062 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COPY, cb_fn, cb_arg); 1063 } 1064 1065 struct spdk_nvme_poll_group * 1066 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 1067 { 1068 struct spdk_nvme_poll_group *group; 1069 1070 group = calloc(1, sizeof(*group)); 1071 if (group == NULL) { 1072 return NULL; 1073 } 1074 1075 group->ctx = ctx; 1076 if (table != NULL) { 1077 group->accel_fn_table = *table; 1078 } 1079 TAILQ_INIT(&group->connected_qpairs); 1080 TAILQ_INIT(&group->disconnected_qpairs); 1081 1082 return group; 1083 } 1084 1085 int 1086 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1087 { 1088 if (!TAILQ_EMPTY(&group->connected_qpairs) || 1089 !TAILQ_EMPTY(&group->disconnected_qpairs)) { 1090 return -EBUSY; 1091 } 1092 1093 free(group); 1094 1095 return 0; 1096 } 1097 1098 spdk_nvme_qp_failure_reason 1099 spdk_nvme_qpair_get_failure_reason(struct spdk_nvme_qpair *qpair) 1100 { 1101 return qpair->failure_reason; 1102 } 1103 1104 int32_t 1105 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1106 uint32_t max_completions) 1107 { 1108 struct ut_nvme_req *req, *tmp; 1109 uint32_t num_completions = 0; 1110 1111 if (!qpair->is_connected) { 1112 return -ENXIO; 1113 } 1114 1115 qpair->in_completion_context = true; 1116 1117 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1118 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1119 qpair->num_outstanding_reqs--; 1120 1121 req->cb_fn(req->cb_arg, &req->cpl); 1122 1123 free(req); 1124 num_completions++; 1125 } 1126 1127 qpair->in_completion_context = false; 1128 if (qpair->delete_after_completion_context) { 1129 spdk_nvme_ctrlr_free_io_qpair(qpair); 1130 } 1131 1132 return num_completions; 1133 } 1134 1135 int64_t 1136 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1137 uint32_t completions_per_qpair, 1138 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1139 { 1140 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1141 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1142 1143 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1144 1145 if (disconnected_qpair_cb == NULL) { 1146 return -EINVAL; 1147 } 1148 1149 TAILQ_FOREACH_SAFE(qpair, &group->disconnected_qpairs, poll_group_tailq, tmp_qpair) { 1150 disconnected_qpair_cb(qpair, group->ctx); 1151 } 1152 1153 TAILQ_FOREACH_SAFE(qpair, &group->connected_qpairs, poll_group_tailq, tmp_qpair) { 1154 if (qpair->failure_reason != SPDK_NVME_QPAIR_FAILURE_NONE) { 1155 spdk_nvme_ctrlr_disconnect_io_qpair(qpair); 1156 /* Bump the number of completions so this counts as "busy" */ 1157 num_completions++; 1158 continue; 1159 } 1160 1161 local_completions = spdk_nvme_qpair_process_completions(qpair, 1162 completions_per_qpair); 1163 if (local_completions < 0 && error_reason == 0) { 1164 error_reason = local_completions; 1165 } else { 1166 num_completions += local_completions; 1167 assert(num_completions >= 0); 1168 } 1169 } 1170 1171 return error_reason ? error_reason : num_completions; 1172 } 1173 1174 int 1175 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1176 struct spdk_nvme_qpair *qpair) 1177 { 1178 CU_ASSERT(!qpair->is_connected); 1179 1180 qpair->poll_group = group; 1181 qpair->poll_group_tailq_head = &group->disconnected_qpairs; 1182 TAILQ_INSERT_TAIL(&group->disconnected_qpairs, qpair, poll_group_tailq); 1183 1184 return 0; 1185 } 1186 1187 int 1188 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1189 struct spdk_nvme_qpair *qpair) 1190 { 1191 CU_ASSERT(!qpair->is_connected); 1192 1193 if (qpair->poll_group == NULL) { 1194 return -ENOENT; 1195 } 1196 1197 CU_ASSERT(qpair->poll_group_tailq_head == &group->disconnected_qpairs); 1198 1199 TAILQ_REMOVE(&group->disconnected_qpairs, qpair, poll_group_tailq); 1200 1201 qpair->poll_group = NULL; 1202 qpair->poll_group_tailq_head = NULL; 1203 1204 return 0; 1205 } 1206 1207 int 1208 spdk_bdev_register(struct spdk_bdev *bdev) 1209 { 1210 g_ut_registered_bdev = bdev; 1211 1212 return g_ut_register_bdev_status; 1213 } 1214 1215 void 1216 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1217 { 1218 int rc; 1219 1220 rc = bdev->fn_table->destruct(bdev->ctxt); 1221 1222 if (bdev == g_ut_registered_bdev) { 1223 g_ut_registered_bdev = NULL; 1224 } 1225 1226 if (rc <= 0 && cb_fn != NULL) { 1227 cb_fn(cb_arg, rc); 1228 } 1229 } 1230 1231 int 1232 spdk_bdev_open_ext(const char *bdev_name, bool write, 1233 spdk_bdev_event_cb_t event_cb, void *event_ctx, 1234 struct spdk_bdev_desc **desc) 1235 { 1236 if (g_ut_registered_bdev == NULL || 1237 strcmp(g_ut_registered_bdev->name, bdev_name) != 0) { 1238 return -ENODEV; 1239 } 1240 1241 *desc = (struct spdk_bdev_desc *)g_ut_registered_bdev; 1242 1243 return 0; 1244 } 1245 1246 struct spdk_bdev * 1247 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 1248 { 1249 return (struct spdk_bdev *)desc; 1250 } 1251 1252 int 1253 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1254 { 1255 bdev->blockcnt = size; 1256 1257 return 0; 1258 } 1259 1260 struct spdk_io_channel * 1261 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1262 { 1263 return (struct spdk_io_channel *)bdev_io->internal.ch; 1264 } 1265 1266 void 1267 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1268 { 1269 bdev_io->internal.status = status; 1270 bdev_io->internal.in_submit_request = false; 1271 } 1272 1273 void 1274 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1275 { 1276 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1277 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1278 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1279 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1280 } else { 1281 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1282 } 1283 1284 bdev_io->internal.error.nvme.cdw0 = cdw0; 1285 bdev_io->internal.error.nvme.sct = sct; 1286 bdev_io->internal.error.nvme.sc = sc; 1287 1288 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1289 } 1290 1291 void 1292 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1293 { 1294 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1295 1296 ut_bdev_io_set_buf(bdev_io); 1297 1298 cb(ch, bdev_io, true); 1299 } 1300 1301 static void 1302 test_create_ctrlr(void) 1303 { 1304 struct spdk_nvme_transport_id trid = {}; 1305 struct spdk_nvme_ctrlr ctrlr = {}; 1306 int rc; 1307 1308 ut_init_trid(&trid); 1309 1310 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1311 CU_ASSERT(rc == 0); 1312 1313 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1314 1315 rc = bdev_nvme_delete("nvme0", &g_any_path); 1316 CU_ASSERT(rc == 0); 1317 1318 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1319 1320 poll_threads(); 1321 spdk_delay_us(1000); 1322 poll_threads(); 1323 1324 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1325 } 1326 1327 static void 1328 ut_check_hotplug_on_reset(void *cb_arg, bool success) 1329 { 1330 bool *detect_remove = cb_arg; 1331 1332 CU_ASSERT(success == false); 1333 SPDK_CU_ASSERT_FATAL(detect_remove != NULL); 1334 1335 *detect_remove = true; 1336 } 1337 1338 static void 1339 test_reset_ctrlr(void) 1340 { 1341 struct spdk_nvme_transport_id trid = {}; 1342 struct spdk_nvme_ctrlr ctrlr = {}; 1343 struct nvme_ctrlr *nvme_ctrlr = NULL; 1344 struct nvme_path_id *curr_trid; 1345 struct spdk_io_channel *ch1, *ch2; 1346 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1347 bool detect_remove; 1348 int rc; 1349 1350 ut_init_trid(&trid); 1351 TAILQ_INIT(&ctrlr.active_io_qpairs); 1352 1353 set_thread(0); 1354 1355 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1356 CU_ASSERT(rc == 0); 1357 1358 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1359 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1360 1361 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1362 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1363 1364 ch1 = spdk_get_io_channel(nvme_ctrlr); 1365 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1366 1367 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1368 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1369 1370 set_thread(1); 1371 1372 ch2 = spdk_get_io_channel(nvme_ctrlr); 1373 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1374 1375 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1376 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1377 1378 /* Reset starts from thread 1. */ 1379 set_thread(1); 1380 1381 /* Case 1: ctrlr is already being destructed. */ 1382 nvme_ctrlr->destruct = true; 1383 1384 rc = bdev_nvme_reset(nvme_ctrlr); 1385 CU_ASSERT(rc == -ENXIO); 1386 1387 /* Case 2: reset is in progress. */ 1388 nvme_ctrlr->destruct = false; 1389 nvme_ctrlr->resetting = true; 1390 1391 rc = bdev_nvme_reset(nvme_ctrlr); 1392 CU_ASSERT(rc == -EBUSY); 1393 1394 /* Case 3: reset completes successfully. */ 1395 nvme_ctrlr->resetting = false; 1396 curr_trid->is_failed = true; 1397 ctrlr.is_failed = true; 1398 1399 rc = bdev_nvme_reset(nvme_ctrlr); 1400 CU_ASSERT(rc == 0); 1401 CU_ASSERT(nvme_ctrlr->resetting == true); 1402 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1403 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1404 1405 poll_thread_times(0, 3); 1406 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1407 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1408 1409 poll_thread_times(0, 1); 1410 poll_thread_times(1, 1); 1411 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 1412 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1413 CU_ASSERT(ctrlr.is_failed == true); 1414 1415 poll_thread_times(1, 1); 1416 poll_thread_times(0, 1); 1417 CU_ASSERT(ctrlr.is_failed == false); 1418 CU_ASSERT(ctrlr.adminq.is_connected == false); 1419 1420 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1421 poll_thread_times(0, 2); 1422 CU_ASSERT(ctrlr.adminq.is_connected == true); 1423 1424 poll_thread_times(0, 1); 1425 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1426 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 1427 1428 poll_thread_times(1, 1); 1429 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 1430 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 1431 CU_ASSERT(nvme_ctrlr->resetting == true); 1432 CU_ASSERT(curr_trid->is_failed == true); 1433 1434 poll_thread_times(0, 2); 1435 CU_ASSERT(nvme_ctrlr->resetting == true); 1436 poll_thread_times(1, 1); 1437 CU_ASSERT(nvme_ctrlr->resetting == true); 1438 poll_thread_times(0, 1); 1439 CU_ASSERT(nvme_ctrlr->resetting == false); 1440 CU_ASSERT(curr_trid->is_failed == false); 1441 1442 /* Case 4: ctrlr is already removed. */ 1443 ctrlr.is_removed = true; 1444 1445 rc = bdev_nvme_reset(nvme_ctrlr); 1446 CU_ASSERT(rc == 0); 1447 1448 detect_remove = false; 1449 nvme_ctrlr->reset_cb_fn = ut_check_hotplug_on_reset; 1450 nvme_ctrlr->reset_cb_arg = &detect_remove; 1451 1452 poll_threads(); 1453 1454 CU_ASSERT(nvme_ctrlr->reset_cb_fn == NULL); 1455 CU_ASSERT(nvme_ctrlr->reset_cb_arg == NULL); 1456 CU_ASSERT(detect_remove == true); 1457 1458 ctrlr.is_removed = false; 1459 1460 spdk_put_io_channel(ch2); 1461 1462 set_thread(0); 1463 1464 spdk_put_io_channel(ch1); 1465 1466 poll_threads(); 1467 1468 rc = bdev_nvme_delete("nvme0", &g_any_path); 1469 CU_ASSERT(rc == 0); 1470 1471 poll_threads(); 1472 spdk_delay_us(1000); 1473 poll_threads(); 1474 1475 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1476 } 1477 1478 static void 1479 test_race_between_reset_and_destruct_ctrlr(void) 1480 { 1481 struct spdk_nvme_transport_id trid = {}; 1482 struct spdk_nvme_ctrlr ctrlr = {}; 1483 struct nvme_ctrlr *nvme_ctrlr; 1484 struct spdk_io_channel *ch1, *ch2; 1485 int rc; 1486 1487 ut_init_trid(&trid); 1488 TAILQ_INIT(&ctrlr.active_io_qpairs); 1489 1490 set_thread(0); 1491 1492 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 1493 CU_ASSERT(rc == 0); 1494 1495 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1496 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1497 1498 ch1 = spdk_get_io_channel(nvme_ctrlr); 1499 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1500 1501 set_thread(1); 1502 1503 ch2 = spdk_get_io_channel(nvme_ctrlr); 1504 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1505 1506 /* Reset starts from thread 1. */ 1507 set_thread(1); 1508 1509 rc = bdev_nvme_reset(nvme_ctrlr); 1510 CU_ASSERT(rc == 0); 1511 CU_ASSERT(nvme_ctrlr->resetting == true); 1512 1513 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1514 set_thread(0); 1515 1516 rc = bdev_nvme_delete("nvme0", &g_any_path); 1517 CU_ASSERT(rc == 0); 1518 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1519 CU_ASSERT(nvme_ctrlr->destruct == true); 1520 CU_ASSERT(nvme_ctrlr->resetting == true); 1521 1522 poll_threads(); 1523 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1524 poll_threads(); 1525 1526 /* Reset completed but ctrlr is not still destructed yet. */ 1527 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1528 CU_ASSERT(nvme_ctrlr->destruct == true); 1529 CU_ASSERT(nvme_ctrlr->resetting == false); 1530 1531 /* New reset request is rejected. */ 1532 rc = bdev_nvme_reset(nvme_ctrlr); 1533 CU_ASSERT(rc == -ENXIO); 1534 1535 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1536 * However there are two channels and destruct is not completed yet. 1537 */ 1538 poll_threads(); 1539 1540 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1541 1542 set_thread(0); 1543 1544 spdk_put_io_channel(ch1); 1545 1546 set_thread(1); 1547 1548 spdk_put_io_channel(ch2); 1549 1550 poll_threads(); 1551 spdk_delay_us(1000); 1552 poll_threads(); 1553 1554 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1555 } 1556 1557 static void 1558 test_failover_ctrlr(void) 1559 { 1560 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1561 struct spdk_nvme_ctrlr ctrlr = {}; 1562 struct nvme_ctrlr *nvme_ctrlr = NULL; 1563 struct nvme_path_id *curr_trid, *next_trid; 1564 struct spdk_io_channel *ch1, *ch2; 1565 int rc; 1566 1567 ut_init_trid(&trid1); 1568 ut_init_trid2(&trid2); 1569 TAILQ_INIT(&ctrlr.active_io_qpairs); 1570 1571 set_thread(0); 1572 1573 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1574 CU_ASSERT(rc == 0); 1575 1576 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1577 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1578 1579 ch1 = spdk_get_io_channel(nvme_ctrlr); 1580 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1581 1582 set_thread(1); 1583 1584 ch2 = spdk_get_io_channel(nvme_ctrlr); 1585 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1586 1587 /* First, test one trid case. */ 1588 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1589 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1590 1591 /* Failover starts from thread 1. */ 1592 set_thread(1); 1593 1594 /* Case 1: ctrlr is already being destructed. */ 1595 nvme_ctrlr->destruct = true; 1596 1597 rc = bdev_nvme_failover(nvme_ctrlr, false); 1598 CU_ASSERT(rc == -ENXIO); 1599 CU_ASSERT(curr_trid->is_failed == false); 1600 1601 /* Case 2: reset is in progress. */ 1602 nvme_ctrlr->destruct = false; 1603 nvme_ctrlr->resetting = true; 1604 1605 rc = bdev_nvme_failover(nvme_ctrlr, false); 1606 CU_ASSERT(rc == -EBUSY); 1607 1608 /* Case 3: reset completes successfully. */ 1609 nvme_ctrlr->resetting = false; 1610 1611 rc = bdev_nvme_failover(nvme_ctrlr, false); 1612 CU_ASSERT(rc == 0); 1613 1614 CU_ASSERT(nvme_ctrlr->resetting == true); 1615 CU_ASSERT(curr_trid->is_failed == true); 1616 1617 poll_threads(); 1618 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1619 poll_threads(); 1620 1621 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1622 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1623 1624 CU_ASSERT(nvme_ctrlr->resetting == false); 1625 CU_ASSERT(curr_trid->is_failed == false); 1626 1627 set_thread(0); 1628 1629 /* Second, test two trids case. */ 1630 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1631 CU_ASSERT(rc == 0); 1632 1633 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1634 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1635 CU_ASSERT(curr_trid == nvme_ctrlr->active_path_id); 1636 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1637 1638 /* Failover starts from thread 1. */ 1639 set_thread(1); 1640 1641 /* Case 4: reset is in progress. */ 1642 nvme_ctrlr->resetting = true; 1643 1644 rc = bdev_nvme_failover(nvme_ctrlr, false); 1645 CU_ASSERT(rc == -EBUSY); 1646 1647 /* Case 5: failover completes successfully. */ 1648 nvme_ctrlr->resetting = false; 1649 1650 rc = bdev_nvme_failover(nvme_ctrlr, false); 1651 CU_ASSERT(rc == 0); 1652 1653 CU_ASSERT(nvme_ctrlr->resetting == true); 1654 1655 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1656 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1657 CU_ASSERT(next_trid != curr_trid); 1658 CU_ASSERT(next_trid == nvme_ctrlr->active_path_id); 1659 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1660 1661 poll_threads(); 1662 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1663 poll_threads(); 1664 1665 CU_ASSERT(nvme_ctrlr->resetting == false); 1666 1667 spdk_put_io_channel(ch2); 1668 1669 set_thread(0); 1670 1671 spdk_put_io_channel(ch1); 1672 1673 poll_threads(); 1674 1675 rc = bdev_nvme_delete("nvme0", &g_any_path); 1676 CU_ASSERT(rc == 0); 1677 1678 poll_threads(); 1679 spdk_delay_us(1000); 1680 poll_threads(); 1681 1682 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1683 } 1684 1685 /* We had a bug when running test/nvmf/host/multipath.sh. The bug was the following. 1686 * 1687 * A nvme_ctrlr had trid1 and trid2 first. trid1 was active. A connection to trid1 was 1688 * disconnected and reset ctrlr failed repeatedly before starting failover from trid1 1689 * to trid2. While processing the failed reset, trid3 was added. trid1 should 1690 * have been active, i.e., the head of the list until the failover completed. 1691 * However trid3 was inserted to the head of the list by mistake. 1692 * 1693 * I/O qpairs have smaller polling period than admin qpair. When a connection is 1694 * detected, I/O qpair may detect the error earlier than admin qpair. I/O qpair error 1695 * invokes reset ctrlr and admin qpair error invokes failover ctrlr. Hence reset ctrlr 1696 * may be executed repeatedly before failover is executed. Hence this bug is real. 1697 * 1698 * The following test verifies the fix. 1699 */ 1700 static void 1701 test_race_between_failover_and_add_secondary_trid(void) 1702 { 1703 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1704 struct spdk_nvme_ctrlr ctrlr = {}; 1705 struct nvme_ctrlr *nvme_ctrlr = NULL; 1706 struct nvme_path_id *path_id1, *path_id2, *path_id3; 1707 struct spdk_io_channel *ch1, *ch2; 1708 int rc; 1709 1710 ut_init_trid(&trid1); 1711 ut_init_trid2(&trid2); 1712 ut_init_trid3(&trid3); 1713 TAILQ_INIT(&ctrlr.active_io_qpairs); 1714 1715 set_thread(0); 1716 1717 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 1718 CU_ASSERT(rc == 0); 1719 1720 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1721 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1722 1723 ch1 = spdk_get_io_channel(nvme_ctrlr); 1724 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1725 1726 set_thread(1); 1727 1728 ch2 = spdk_get_io_channel(nvme_ctrlr); 1729 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1730 1731 set_thread(0); 1732 1733 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1734 CU_ASSERT(rc == 0); 1735 1736 path_id1 = TAILQ_FIRST(&nvme_ctrlr->trids); 1737 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 1738 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1739 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1740 path_id2 = TAILQ_NEXT(path_id1, link); 1741 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 1742 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1743 1744 ctrlr.fail_reset = true; 1745 1746 rc = bdev_nvme_reset(nvme_ctrlr); 1747 CU_ASSERT(rc == 0); 1748 1749 poll_threads(); 1750 1751 CU_ASSERT(path_id1->is_failed == true); 1752 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1753 1754 rc = bdev_nvme_reset(nvme_ctrlr); 1755 CU_ASSERT(rc == 0); 1756 1757 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 1758 CU_ASSERT(rc == 0); 1759 1760 CU_ASSERT(path_id1 == TAILQ_FIRST(&nvme_ctrlr->trids)); 1761 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 1762 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id1->trid, &trid1) == 0); 1763 CU_ASSERT(path_id2 == TAILQ_NEXT(path_id1, link)); 1764 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id2->trid, &trid2) == 0); 1765 path_id3 = TAILQ_NEXT(path_id2, link); 1766 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 1767 CU_ASSERT(spdk_nvme_transport_id_compare(&path_id3->trid, &trid3) == 0); 1768 1769 poll_threads(); 1770 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1771 poll_threads(); 1772 1773 spdk_put_io_channel(ch1); 1774 1775 set_thread(1); 1776 1777 spdk_put_io_channel(ch2); 1778 1779 poll_threads(); 1780 1781 set_thread(0); 1782 1783 rc = bdev_nvme_delete("nvme0", &g_any_path); 1784 CU_ASSERT(rc == 0); 1785 1786 poll_threads(); 1787 spdk_delay_us(1000); 1788 poll_threads(); 1789 1790 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1791 } 1792 1793 static void 1794 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1795 { 1796 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1797 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1798 } 1799 1800 static void 1801 test_pending_reset(void) 1802 { 1803 struct spdk_nvme_transport_id trid = {}; 1804 struct spdk_nvme_ctrlr *ctrlr; 1805 struct nvme_ctrlr *nvme_ctrlr = NULL; 1806 const int STRING_SIZE = 32; 1807 const char *attached_names[STRING_SIZE]; 1808 struct nvme_bdev *bdev; 1809 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1810 struct spdk_io_channel *ch1, *ch2; 1811 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1812 struct nvme_io_path *io_path1, *io_path2; 1813 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1814 int rc; 1815 1816 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1817 ut_init_trid(&trid); 1818 1819 set_thread(0); 1820 1821 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1822 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1823 1824 g_ut_attach_ctrlr_status = 0; 1825 g_ut_attach_bdev_count = 1; 1826 1827 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1828 attach_ctrlr_done, NULL, NULL, NULL, false); 1829 CU_ASSERT(rc == 0); 1830 1831 spdk_delay_us(1000); 1832 poll_threads(); 1833 1834 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1835 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1836 1837 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1838 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1839 1840 ch1 = spdk_get_io_channel(bdev); 1841 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1842 1843 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1844 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 1845 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 1846 ctrlr_ch1 = io_path1->qpair->ctrlr_ch; 1847 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1848 1849 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1850 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1851 1852 set_thread(1); 1853 1854 ch2 = spdk_get_io_channel(bdev); 1855 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1856 1857 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1858 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 1859 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 1860 ctrlr_ch2 = io_path2->qpair->ctrlr_ch; 1861 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1862 1863 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1864 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1865 1866 /* The first reset request is submitted on thread 1, and the second reset request 1867 * is submitted on thread 0 while processing the first request. 1868 */ 1869 bdev_nvme_submit_request(ch2, first_bdev_io); 1870 CU_ASSERT(nvme_ctrlr->resetting == true); 1871 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1872 1873 set_thread(0); 1874 1875 bdev_nvme_submit_request(ch1, second_bdev_io); 1876 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1877 1878 poll_threads(); 1879 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1880 poll_threads(); 1881 1882 CU_ASSERT(nvme_ctrlr->resetting == false); 1883 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1884 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1885 1886 /* The first reset request is submitted on thread 1, and the second reset request 1887 * is submitted on thread 0 while processing the first request. 1888 * 1889 * The difference from the above scenario is that the controller is removed while 1890 * processing the first request. Hence both reset requests should fail. 1891 */ 1892 set_thread(1); 1893 1894 bdev_nvme_submit_request(ch2, first_bdev_io); 1895 CU_ASSERT(nvme_ctrlr->resetting == true); 1896 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1897 1898 set_thread(0); 1899 1900 bdev_nvme_submit_request(ch1, second_bdev_io); 1901 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1902 1903 ctrlr->fail_reset = true; 1904 1905 poll_threads(); 1906 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1907 poll_threads(); 1908 1909 CU_ASSERT(nvme_ctrlr->resetting == false); 1910 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1911 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1912 1913 spdk_put_io_channel(ch1); 1914 1915 set_thread(1); 1916 1917 spdk_put_io_channel(ch2); 1918 1919 poll_threads(); 1920 1921 set_thread(0); 1922 1923 rc = bdev_nvme_delete("nvme0", &g_any_path); 1924 CU_ASSERT(rc == 0); 1925 1926 poll_threads(); 1927 spdk_delay_us(1000); 1928 poll_threads(); 1929 1930 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1931 1932 free(first_bdev_io); 1933 free(second_bdev_io); 1934 } 1935 1936 static void 1937 test_attach_ctrlr(void) 1938 { 1939 struct spdk_nvme_transport_id trid = {}; 1940 struct spdk_nvme_ctrlr *ctrlr; 1941 struct nvme_ctrlr *nvme_ctrlr; 1942 const int STRING_SIZE = 32; 1943 const char *attached_names[STRING_SIZE]; 1944 struct nvme_bdev *nbdev; 1945 int rc; 1946 1947 set_thread(0); 1948 1949 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1950 ut_init_trid(&trid); 1951 1952 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 1953 * by probe polling. 1954 */ 1955 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1956 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1957 1958 ctrlr->is_failed = true; 1959 g_ut_attach_ctrlr_status = -EIO; 1960 g_ut_attach_bdev_count = 0; 1961 1962 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1963 attach_ctrlr_done, NULL, NULL, NULL, false); 1964 CU_ASSERT(rc == 0); 1965 1966 spdk_delay_us(1000); 1967 poll_threads(); 1968 1969 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1970 1971 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 1972 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1973 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1974 1975 g_ut_attach_ctrlr_status = 0; 1976 1977 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 1978 attach_ctrlr_done, NULL, NULL, NULL, false); 1979 CU_ASSERT(rc == 0); 1980 1981 spdk_delay_us(1000); 1982 poll_threads(); 1983 1984 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1985 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1986 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1987 1988 rc = bdev_nvme_delete("nvme0", &g_any_path); 1989 CU_ASSERT(rc == 0); 1990 1991 poll_threads(); 1992 spdk_delay_us(1000); 1993 poll_threads(); 1994 1995 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1996 1997 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 1998 * one nvme_bdev is created. 1999 */ 2000 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2001 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2002 2003 g_ut_attach_bdev_count = 1; 2004 2005 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2006 attach_ctrlr_done, NULL, NULL, NULL, false); 2007 CU_ASSERT(rc == 0); 2008 2009 spdk_delay_us(1000); 2010 poll_threads(); 2011 2012 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2013 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2014 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2015 2016 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 2017 attached_names[0] = NULL; 2018 2019 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2020 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 2021 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 2022 2023 rc = bdev_nvme_delete("nvme0", &g_any_path); 2024 CU_ASSERT(rc == 0); 2025 2026 poll_threads(); 2027 spdk_delay_us(1000); 2028 poll_threads(); 2029 2030 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2031 2032 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 2033 * created because creating one nvme_bdev failed. 2034 */ 2035 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2036 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2037 2038 g_ut_register_bdev_status = -EINVAL; 2039 g_ut_attach_bdev_count = 0; 2040 2041 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2042 attach_ctrlr_done, NULL, NULL, NULL, false); 2043 CU_ASSERT(rc == 0); 2044 2045 spdk_delay_us(1000); 2046 poll_threads(); 2047 2048 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2049 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2050 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 2051 2052 CU_ASSERT(attached_names[0] == NULL); 2053 2054 rc = bdev_nvme_delete("nvme0", &g_any_path); 2055 CU_ASSERT(rc == 0); 2056 2057 poll_threads(); 2058 spdk_delay_us(1000); 2059 poll_threads(); 2060 2061 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2062 2063 g_ut_register_bdev_status = 0; 2064 } 2065 2066 static void 2067 test_aer_cb(void) 2068 { 2069 struct spdk_nvme_transport_id trid = {}; 2070 struct spdk_nvme_ctrlr *ctrlr; 2071 struct nvme_ctrlr *nvme_ctrlr; 2072 struct nvme_bdev *bdev; 2073 const int STRING_SIZE = 32; 2074 const char *attached_names[STRING_SIZE]; 2075 union spdk_nvme_async_event_completion event = {}; 2076 struct spdk_nvme_cpl cpl = {}; 2077 int rc; 2078 2079 set_thread(0); 2080 2081 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2082 ut_init_trid(&trid); 2083 2084 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 2085 * namespaces are populated. 2086 */ 2087 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 2088 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2089 2090 ctrlr->ns[0].is_active = false; 2091 2092 g_ut_attach_ctrlr_status = 0; 2093 g_ut_attach_bdev_count = 3; 2094 2095 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2096 attach_ctrlr_done, NULL, NULL, NULL, false); 2097 CU_ASSERT(rc == 0); 2098 2099 spdk_delay_us(1000); 2100 poll_threads(); 2101 2102 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2103 poll_threads(); 2104 2105 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2106 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2107 2108 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 2109 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2110 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2111 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2112 2113 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 2114 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2115 CU_ASSERT(bdev->disk.blockcnt == 1024); 2116 2117 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 2118 * change the size of the 4th namespace. 2119 */ 2120 ctrlr->ns[0].is_active = true; 2121 ctrlr->ns[2].is_active = false; 2122 ctrlr->nsdata[3].nsze = 2048; 2123 2124 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2125 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 2126 cpl.cdw0 = event.raw; 2127 2128 aer_cb(nvme_ctrlr, &cpl); 2129 2130 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2131 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2132 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 2133 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2134 CU_ASSERT(bdev->disk.blockcnt == 2048); 2135 2136 /* Change ANA state of active namespaces. */ 2137 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2138 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2139 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2140 2141 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 2142 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 2143 cpl.cdw0 = event.raw; 2144 2145 aer_cb(nvme_ctrlr, &cpl); 2146 2147 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2148 poll_threads(); 2149 2150 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2151 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2152 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2153 2154 rc = bdev_nvme_delete("nvme0", &g_any_path); 2155 CU_ASSERT(rc == 0); 2156 2157 poll_threads(); 2158 spdk_delay_us(1000); 2159 poll_threads(); 2160 2161 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2162 } 2163 2164 static void 2165 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2166 enum spdk_bdev_io_type io_type) 2167 { 2168 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2169 struct nvme_io_path *io_path; 2170 struct spdk_nvme_qpair *qpair; 2171 2172 io_path = bdev_nvme_find_io_path(nbdev_ch); 2173 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2174 qpair = io_path->qpair->qpair; 2175 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2176 2177 bdev_io->type = io_type; 2178 bdev_io->internal.in_submit_request = true; 2179 2180 bdev_nvme_submit_request(ch, bdev_io); 2181 2182 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2183 CU_ASSERT(qpair->num_outstanding_reqs == 1); 2184 CU_ASSERT(io_path->io_outstanding == 1); 2185 2186 poll_threads(); 2187 2188 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2189 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2190 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2191 CU_ASSERT(io_path->io_outstanding == 0); 2192 } 2193 2194 static void 2195 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2196 enum spdk_bdev_io_type io_type) 2197 { 2198 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2199 struct nvme_io_path *io_path; 2200 struct spdk_nvme_qpair *qpair; 2201 2202 io_path = bdev_nvme_find_io_path(nbdev_ch); 2203 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2204 qpair = io_path->qpair->qpair; 2205 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2206 2207 bdev_io->type = io_type; 2208 bdev_io->internal.in_submit_request = true; 2209 2210 bdev_nvme_submit_request(ch, bdev_io); 2211 2212 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2213 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2214 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2215 CU_ASSERT(io_path->io_outstanding == 0); 2216 } 2217 2218 static void 2219 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 2220 { 2221 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 2222 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 2223 struct ut_nvme_req *req; 2224 struct nvme_io_path *io_path; 2225 struct spdk_nvme_qpair *qpair; 2226 2227 io_path = bdev_nvme_find_io_path(nbdev_ch); 2228 SPDK_CU_ASSERT_FATAL(io_path != NULL); 2229 qpair = io_path->qpair->qpair; 2230 SPDK_CU_ASSERT_FATAL(qpair != NULL); 2231 2232 /* Only compare and write now. */ 2233 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 2234 bdev_io->internal.in_submit_request = true; 2235 2236 bdev_nvme_submit_request(ch, bdev_io); 2237 2238 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2239 CU_ASSERT(qpair->num_outstanding_reqs == 2); 2240 CU_ASSERT(bio->first_fused_submitted == true); 2241 CU_ASSERT(io_path->io_outstanding == 1); 2242 2243 /* First outstanding request is compare operation. */ 2244 req = TAILQ_FIRST(&qpair->outstanding_reqs); 2245 SPDK_CU_ASSERT_FATAL(req != NULL); 2246 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 2247 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 2248 2249 poll_threads(); 2250 2251 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2252 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2253 CU_ASSERT(qpair->num_outstanding_reqs == 0); 2254 CU_ASSERT(io_path->io_outstanding == 0); 2255 } 2256 2257 static void 2258 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 2259 struct spdk_nvme_ctrlr *ctrlr) 2260 { 2261 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2262 bdev_io->internal.in_submit_request = true; 2263 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2264 2265 bdev_nvme_submit_request(ch, bdev_io); 2266 2267 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2268 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2269 2270 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2271 poll_thread_times(1, 1); 2272 2273 CU_ASSERT(bdev_io->internal.in_submit_request == true); 2274 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2275 2276 poll_thread_times(0, 1); 2277 2278 CU_ASSERT(bdev_io->internal.in_submit_request == false); 2279 } 2280 2281 static void 2282 test_submit_nvme_cmd(void) 2283 { 2284 struct spdk_nvme_transport_id trid = {}; 2285 struct spdk_nvme_ctrlr *ctrlr; 2286 struct nvme_ctrlr *nvme_ctrlr; 2287 const int STRING_SIZE = 32; 2288 const char *attached_names[STRING_SIZE]; 2289 struct nvme_bdev *bdev; 2290 struct spdk_bdev_io *bdev_io; 2291 struct spdk_io_channel *ch; 2292 struct spdk_bdev_ext_io_opts ext_io_opts = {}; 2293 int rc; 2294 2295 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2296 ut_init_trid(&trid); 2297 2298 set_thread(1); 2299 2300 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2301 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2302 2303 g_ut_attach_ctrlr_status = 0; 2304 g_ut_attach_bdev_count = 1; 2305 2306 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2307 attach_ctrlr_done, NULL, NULL, NULL, false); 2308 CU_ASSERT(rc == 0); 2309 2310 spdk_delay_us(1000); 2311 poll_threads(); 2312 2313 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2314 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2315 2316 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2317 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2318 2319 set_thread(0); 2320 2321 ch = spdk_get_io_channel(bdev); 2322 SPDK_CU_ASSERT_FATAL(ch != NULL); 2323 2324 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2325 2326 bdev_io->u.bdev.iovs = NULL; 2327 2328 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2329 2330 ut_bdev_io_set_buf(bdev_io); 2331 2332 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2333 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2334 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2335 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2336 2337 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2338 2339 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2340 2341 /* Verify that ext NVME API is called if bdev_io ext_opts is set */ 2342 bdev_io->u.bdev.ext_opts = &ext_io_opts; 2343 g_ut_readv_ext_called = false; 2344 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2345 CU_ASSERT(g_ut_readv_ext_called == true); 2346 g_ut_readv_ext_called = false; 2347 2348 g_ut_writev_ext_called = false; 2349 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2350 CU_ASSERT(g_ut_writev_ext_called == true); 2351 g_ut_writev_ext_called = false; 2352 bdev_io->u.bdev.ext_opts = NULL; 2353 2354 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2355 2356 free(bdev_io); 2357 2358 spdk_put_io_channel(ch); 2359 2360 poll_threads(); 2361 2362 set_thread(1); 2363 2364 rc = bdev_nvme_delete("nvme0", &g_any_path); 2365 CU_ASSERT(rc == 0); 2366 2367 poll_threads(); 2368 spdk_delay_us(1000); 2369 poll_threads(); 2370 2371 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2372 } 2373 2374 static void 2375 test_add_remove_trid(void) 2376 { 2377 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 2378 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2379 struct nvme_ctrlr *nvme_ctrlr = NULL; 2380 const int STRING_SIZE = 32; 2381 const char *attached_names[STRING_SIZE]; 2382 struct nvme_path_id *ctrid; 2383 int rc; 2384 2385 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2386 ut_init_trid(&path1.trid); 2387 ut_init_trid2(&path2.trid); 2388 ut_init_trid3(&path3.trid); 2389 2390 set_thread(0); 2391 2392 g_ut_attach_ctrlr_status = 0; 2393 g_ut_attach_bdev_count = 0; 2394 2395 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2396 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2397 2398 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2399 attach_ctrlr_done, NULL, NULL, NULL, false); 2400 CU_ASSERT(rc == 0); 2401 2402 spdk_delay_us(1000); 2403 poll_threads(); 2404 2405 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2406 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2407 2408 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2409 2410 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2411 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2412 2413 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2414 attach_ctrlr_done, NULL, NULL, NULL, false); 2415 CU_ASSERT(rc == 0); 2416 2417 spdk_delay_us(1000); 2418 poll_threads(); 2419 2420 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2421 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2422 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2423 break; 2424 } 2425 } 2426 CU_ASSERT(ctrid != NULL); 2427 2428 /* trid3 is not in the registered list. */ 2429 rc = bdev_nvme_delete("nvme0", &path3); 2430 CU_ASSERT(rc == -ENXIO); 2431 2432 /* trid2 is not used, and simply removed. */ 2433 rc = bdev_nvme_delete("nvme0", &path2); 2434 CU_ASSERT(rc == 0); 2435 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2436 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2437 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) != 0); 2438 } 2439 2440 ctrlr3 = ut_attach_ctrlr(&path3.trid, 0, false, false); 2441 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2442 2443 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 2444 attach_ctrlr_done, NULL, NULL, NULL, false); 2445 CU_ASSERT(rc == 0); 2446 2447 spdk_delay_us(1000); 2448 poll_threads(); 2449 2450 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2451 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2452 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path3.trid) == 0) { 2453 break; 2454 } 2455 } 2456 CU_ASSERT(ctrid != NULL); 2457 2458 /* path1 is currently used and path3 is an alternative path. 2459 * If we remove path1, path is changed to path3. 2460 */ 2461 rc = bdev_nvme_delete("nvme0", &path1); 2462 CU_ASSERT(rc == 0); 2463 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2464 CU_ASSERT(nvme_ctrlr->resetting == true); 2465 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2466 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &path1.trid) != 0); 2467 } 2468 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path3.trid) == 0); 2469 2470 poll_threads(); 2471 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2472 poll_threads(); 2473 2474 CU_ASSERT(nvme_ctrlr->resetting == false); 2475 2476 /* path3 is the current and only path. If we remove path3, the corresponding 2477 * nvme_ctrlr is removed. 2478 */ 2479 rc = bdev_nvme_delete("nvme0", &path3); 2480 CU_ASSERT(rc == 0); 2481 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2482 2483 poll_threads(); 2484 spdk_delay_us(1000); 2485 poll_threads(); 2486 2487 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2488 2489 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, false, false); 2490 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2491 2492 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 2493 attach_ctrlr_done, NULL, NULL, NULL, false); 2494 CU_ASSERT(rc == 0); 2495 2496 spdk_delay_us(1000); 2497 poll_threads(); 2498 2499 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2500 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2501 2502 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2503 2504 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, false, false); 2505 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2506 2507 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 2508 attach_ctrlr_done, NULL, NULL, NULL, false); 2509 CU_ASSERT(rc == 0); 2510 2511 spdk_delay_us(1000); 2512 poll_threads(); 2513 2514 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->active_path_id->trid, &path1.trid) == 0); 2515 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2516 if (spdk_nvme_transport_id_compare(&ctrid->trid, &path2.trid) == 0) { 2517 break; 2518 } 2519 } 2520 CU_ASSERT(ctrid != NULL); 2521 2522 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2523 rc = bdev_nvme_delete("nvme0", &g_any_path); 2524 CU_ASSERT(rc == 0); 2525 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2526 2527 poll_threads(); 2528 spdk_delay_us(1000); 2529 poll_threads(); 2530 2531 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2532 } 2533 2534 static void 2535 test_abort(void) 2536 { 2537 struct spdk_nvme_transport_id trid = {}; 2538 struct nvme_ctrlr_opts opts = {}; 2539 struct spdk_nvme_ctrlr *ctrlr; 2540 struct nvme_ctrlr *nvme_ctrlr; 2541 const int STRING_SIZE = 32; 2542 const char *attached_names[STRING_SIZE]; 2543 struct nvme_bdev *bdev; 2544 struct spdk_bdev_io *write_io, *fuse_io, *admin_io, *abort_io; 2545 struct spdk_io_channel *ch1, *ch2; 2546 struct nvme_bdev_channel *nbdev_ch1; 2547 struct nvme_io_path *io_path1; 2548 struct nvme_qpair *nvme_qpair1; 2549 int rc; 2550 2551 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2552 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2553 * are submitted on thread 1. Both should succeed. 2554 */ 2555 2556 ut_init_trid(&trid); 2557 2558 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2559 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2560 2561 g_ut_attach_ctrlr_status = 0; 2562 g_ut_attach_bdev_count = 1; 2563 2564 set_thread(1); 2565 2566 opts.ctrlr_loss_timeout_sec = -1; 2567 opts.reconnect_delay_sec = 1; 2568 2569 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2570 attach_ctrlr_done, NULL, NULL, &opts, false); 2571 CU_ASSERT(rc == 0); 2572 2573 spdk_delay_us(1000); 2574 poll_threads(); 2575 2576 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2577 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2578 2579 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2580 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2581 2582 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2583 ut_bdev_io_set_buf(write_io); 2584 2585 fuse_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE, bdev, NULL); 2586 ut_bdev_io_set_buf(fuse_io); 2587 2588 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2589 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2590 2591 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2592 2593 set_thread(0); 2594 2595 ch1 = spdk_get_io_channel(bdev); 2596 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2597 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2598 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 2599 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 2600 nvme_qpair1 = io_path1->qpair; 2601 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 2602 2603 set_thread(1); 2604 2605 ch2 = spdk_get_io_channel(bdev); 2606 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2607 2608 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2609 fuse_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2610 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2611 2612 /* Aborting the already completed request should fail. */ 2613 write_io->internal.in_submit_request = true; 2614 bdev_nvme_submit_request(ch1, write_io); 2615 poll_threads(); 2616 2617 CU_ASSERT(write_io->internal.in_submit_request == false); 2618 2619 abort_io->u.abort.bio_to_abort = write_io; 2620 abort_io->internal.in_submit_request = true; 2621 2622 bdev_nvme_submit_request(ch1, abort_io); 2623 2624 poll_threads(); 2625 2626 CU_ASSERT(abort_io->internal.in_submit_request == false); 2627 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2628 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2629 2630 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2631 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2632 2633 admin_io->internal.in_submit_request = true; 2634 bdev_nvme_submit_request(ch1, admin_io); 2635 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2636 poll_threads(); 2637 2638 CU_ASSERT(admin_io->internal.in_submit_request == false); 2639 2640 abort_io->u.abort.bio_to_abort = admin_io; 2641 abort_io->internal.in_submit_request = true; 2642 2643 bdev_nvme_submit_request(ch2, abort_io); 2644 2645 poll_threads(); 2646 2647 CU_ASSERT(abort_io->internal.in_submit_request == false); 2648 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2649 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2650 2651 /* Aborting the write request should succeed. */ 2652 write_io->internal.in_submit_request = true; 2653 bdev_nvme_submit_request(ch1, write_io); 2654 2655 CU_ASSERT(write_io->internal.in_submit_request == true); 2656 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 2657 2658 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2659 abort_io->u.abort.bio_to_abort = write_io; 2660 abort_io->internal.in_submit_request = true; 2661 2662 bdev_nvme_submit_request(ch1, abort_io); 2663 2664 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2665 poll_threads(); 2666 2667 CU_ASSERT(abort_io->internal.in_submit_request == false); 2668 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2669 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2670 CU_ASSERT(write_io->internal.in_submit_request == false); 2671 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2672 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2673 2674 /* Aborting the fuse request should succeed. */ 2675 fuse_io->internal.in_submit_request = true; 2676 bdev_nvme_submit_request(ch1, fuse_io); 2677 2678 CU_ASSERT(fuse_io->internal.in_submit_request == true); 2679 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 2); 2680 2681 abort_io->u.abort.bio_to_abort = fuse_io; 2682 abort_io->internal.in_submit_request = true; 2683 2684 bdev_nvme_submit_request(ch1, abort_io); 2685 2686 spdk_delay_us(10000); 2687 poll_threads(); 2688 2689 CU_ASSERT(abort_io->internal.in_submit_request == false); 2690 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2691 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2692 CU_ASSERT(fuse_io->internal.in_submit_request == false); 2693 CU_ASSERT(fuse_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2694 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 2695 2696 /* Aborting the admin request should succeed. */ 2697 admin_io->internal.in_submit_request = true; 2698 bdev_nvme_submit_request(ch1, admin_io); 2699 2700 CU_ASSERT(admin_io->internal.in_submit_request == true); 2701 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2702 2703 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2704 abort_io->u.abort.bio_to_abort = admin_io; 2705 abort_io->internal.in_submit_request = true; 2706 2707 bdev_nvme_submit_request(ch2, abort_io); 2708 2709 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2710 poll_threads(); 2711 2712 CU_ASSERT(abort_io->internal.in_submit_request == false); 2713 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2714 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2715 CU_ASSERT(admin_io->internal.in_submit_request == false); 2716 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2717 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2718 2719 set_thread(0); 2720 2721 /* If qpair is disconnected, it is freed and then reconnected via resetting 2722 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 2723 * while resetting the nvme_ctrlr. 2724 */ 2725 nvme_qpair1->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 2726 2727 poll_thread_times(0, 3); 2728 2729 CU_ASSERT(nvme_qpair1->qpair == NULL); 2730 CU_ASSERT(nvme_ctrlr->resetting == true); 2731 2732 write_io->internal.in_submit_request = true; 2733 2734 bdev_nvme_submit_request(ch1, write_io); 2735 2736 CU_ASSERT(write_io->internal.in_submit_request == true); 2737 CU_ASSERT(write_io == TAILQ_FIRST(&nbdev_ch1->retry_io_list)); 2738 2739 /* Aborting the queued write request should succeed immediately. */ 2740 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2741 abort_io->u.abort.bio_to_abort = write_io; 2742 abort_io->internal.in_submit_request = true; 2743 2744 bdev_nvme_submit_request(ch1, abort_io); 2745 2746 CU_ASSERT(abort_io->internal.in_submit_request == false); 2747 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2748 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2749 CU_ASSERT(write_io->internal.in_submit_request == false); 2750 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2751 2752 poll_threads(); 2753 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2754 poll_threads(); 2755 2756 spdk_put_io_channel(ch1); 2757 2758 set_thread(1); 2759 2760 spdk_put_io_channel(ch2); 2761 2762 poll_threads(); 2763 2764 free(write_io); 2765 free(fuse_io); 2766 free(admin_io); 2767 free(abort_io); 2768 2769 set_thread(1); 2770 2771 rc = bdev_nvme_delete("nvme0", &g_any_path); 2772 CU_ASSERT(rc == 0); 2773 2774 poll_threads(); 2775 spdk_delay_us(1000); 2776 poll_threads(); 2777 2778 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2779 } 2780 2781 static void 2782 test_get_io_qpair(void) 2783 { 2784 struct spdk_nvme_transport_id trid = {}; 2785 struct spdk_nvme_ctrlr ctrlr = {}; 2786 struct nvme_ctrlr *nvme_ctrlr = NULL; 2787 struct spdk_io_channel *ch; 2788 struct nvme_ctrlr_channel *ctrlr_ch; 2789 struct spdk_nvme_qpair *qpair; 2790 int rc; 2791 2792 ut_init_trid(&trid); 2793 TAILQ_INIT(&ctrlr.active_io_qpairs); 2794 2795 set_thread(0); 2796 2797 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 2798 CU_ASSERT(rc == 0); 2799 2800 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2801 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2802 2803 ch = spdk_get_io_channel(nvme_ctrlr); 2804 SPDK_CU_ASSERT_FATAL(ch != NULL); 2805 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2806 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 2807 2808 qpair = bdev_nvme_get_io_qpair(ch); 2809 CU_ASSERT(qpair == ctrlr_ch->qpair->qpair); 2810 2811 spdk_put_io_channel(ch); 2812 2813 rc = bdev_nvme_delete("nvme0", &g_any_path); 2814 CU_ASSERT(rc == 0); 2815 2816 poll_threads(); 2817 spdk_delay_us(1000); 2818 poll_threads(); 2819 2820 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2821 } 2822 2823 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2824 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2825 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2826 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2827 */ 2828 static void 2829 test_bdev_unregister(void) 2830 { 2831 struct spdk_nvme_transport_id trid = {}; 2832 struct spdk_nvme_ctrlr *ctrlr; 2833 struct nvme_ctrlr *nvme_ctrlr; 2834 struct nvme_ns *nvme_ns1, *nvme_ns2; 2835 const int STRING_SIZE = 32; 2836 const char *attached_names[STRING_SIZE]; 2837 struct nvme_bdev *bdev1, *bdev2; 2838 int rc; 2839 2840 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2841 ut_init_trid(&trid); 2842 2843 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2844 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2845 2846 g_ut_attach_ctrlr_status = 0; 2847 g_ut_attach_bdev_count = 2; 2848 2849 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2850 attach_ctrlr_done, NULL, NULL, NULL, false); 2851 CU_ASSERT(rc == 0); 2852 2853 spdk_delay_us(1000); 2854 poll_threads(); 2855 2856 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2857 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2858 2859 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2860 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2861 2862 bdev1 = nvme_ns1->bdev; 2863 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2864 2865 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2866 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2867 2868 bdev2 = nvme_ns2->bdev; 2869 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2870 2871 bdev_nvme_destruct(&bdev1->disk); 2872 bdev_nvme_destruct(&bdev2->disk); 2873 2874 poll_threads(); 2875 2876 CU_ASSERT(nvme_ns1->bdev == NULL); 2877 CU_ASSERT(nvme_ns2->bdev == NULL); 2878 2879 nvme_ctrlr->destruct = true; 2880 _nvme_ctrlr_destruct(nvme_ctrlr); 2881 2882 poll_threads(); 2883 spdk_delay_us(1000); 2884 poll_threads(); 2885 2886 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2887 } 2888 2889 static void 2890 test_compare_ns(void) 2891 { 2892 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2893 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2894 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2895 struct spdk_uuid uuid1 = { .u.raw = { 0xAA } }; 2896 struct spdk_uuid uuid2 = { .u.raw = { 0xAB } }; 2897 2898 /* No IDs are defined. */ 2899 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2900 2901 /* Only EUI64 are defined and not matched. */ 2902 nsdata1.eui64 = 0xABCDEF0123456789; 2903 nsdata2.eui64 = 0xBBCDEF0123456789; 2904 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2905 2906 /* Only EUI64 are defined and matched. */ 2907 nsdata2.eui64 = 0xABCDEF0123456789; 2908 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2909 2910 /* Only NGUID are defined and not matched. */ 2911 nsdata1.eui64 = 0x0; 2912 nsdata2.eui64 = 0x0; 2913 nsdata1.nguid[0] = 0x12; 2914 nsdata2.nguid[0] = 0x10; 2915 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2916 2917 /* Only NGUID are defined and matched. */ 2918 nsdata2.nguid[0] = 0x12; 2919 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2920 2921 /* Only UUID are defined and not matched. */ 2922 nsdata1.nguid[0] = 0x0; 2923 nsdata2.nguid[0] = 0x0; 2924 ns1.uuid = &uuid1; 2925 ns2.uuid = &uuid2; 2926 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2927 2928 /* Only one UUID is defined. */ 2929 ns1.uuid = NULL; 2930 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2931 2932 /* Only UUID are defined and matched. */ 2933 ns1.uuid = &uuid2; 2934 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2935 2936 /* All EUI64, NGUID, and UUID are defined and matched. */ 2937 nsdata1.eui64 = 0x123456789ABCDEF; 2938 nsdata2.eui64 = 0x123456789ABCDEF; 2939 nsdata1.nguid[15] = 0x34; 2940 nsdata2.nguid[15] = 0x34; 2941 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2942 2943 /* CSI are not matched. */ 2944 ns1.csi = SPDK_NVME_CSI_ZNS; 2945 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2946 } 2947 2948 static void 2949 test_init_ana_log_page(void) 2950 { 2951 struct spdk_nvme_transport_id trid = {}; 2952 struct spdk_nvme_ctrlr *ctrlr; 2953 struct nvme_ctrlr *nvme_ctrlr; 2954 const int STRING_SIZE = 32; 2955 const char *attached_names[STRING_SIZE]; 2956 int rc; 2957 2958 set_thread(0); 2959 2960 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2961 ut_init_trid(&trid); 2962 2963 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 2964 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2965 2966 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2967 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2968 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2969 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 2970 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2971 2972 g_ut_attach_ctrlr_status = 0; 2973 g_ut_attach_bdev_count = 5; 2974 2975 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 2976 attach_ctrlr_done, NULL, NULL, NULL, false); 2977 CU_ASSERT(rc == 0); 2978 2979 spdk_delay_us(1000); 2980 poll_threads(); 2981 2982 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2983 poll_threads(); 2984 2985 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2986 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2987 2988 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2989 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2990 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2991 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2992 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 2993 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 2994 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2995 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2996 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 2997 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2998 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 2999 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 3000 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 3001 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 3002 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 3003 3004 rc = bdev_nvme_delete("nvme0", &g_any_path); 3005 CU_ASSERT(rc == 0); 3006 3007 poll_threads(); 3008 spdk_delay_us(1000); 3009 poll_threads(); 3010 3011 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3012 } 3013 3014 static void 3015 init_accel(void) 3016 { 3017 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 3018 sizeof(int), "accel_p"); 3019 } 3020 3021 static void 3022 fini_accel(void) 3023 { 3024 spdk_io_device_unregister(g_accel_p, NULL); 3025 } 3026 3027 static void 3028 test_get_memory_domains(void) 3029 { 3030 struct nvme_ctrlr ctrlr_1 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 3031 struct nvme_ctrlr ctrlr_2 = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaaadbeeef }; 3032 struct nvme_ns ns_1 = { .ctrlr = &ctrlr_1 }; 3033 struct nvme_ns ns_2 = { .ctrlr = &ctrlr_2 }; 3034 struct nvme_bdev nbdev = { .nvme_ns_list = TAILQ_HEAD_INITIALIZER(nbdev.nvme_ns_list) }; 3035 struct spdk_memory_domain *domains[4] = {}; 3036 int rc = 0; 3037 3038 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_1, tailq); 3039 3040 /* nvme controller doesn't have memory domains */ 3041 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 0); 3042 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3043 CU_ASSERT(rc == 0); 3044 CU_ASSERT(domains[0] == NULL); 3045 CU_ASSERT(domains[1] == NULL); 3046 3047 /* nvme controller has a memory domain */ 3048 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 1); 3049 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3050 CU_ASSERT(rc == 1); 3051 CU_ASSERT(domains[0] != NULL); 3052 memset(domains, 0, sizeof(domains)); 3053 3054 /* multipath, 2 controllers report 1 memory domain each */ 3055 TAILQ_INSERT_TAIL(&nbdev.nvme_ns_list, &ns_2, tailq); 3056 3057 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 3058 CU_ASSERT(rc == 2); 3059 CU_ASSERT(domains[0] != NULL); 3060 CU_ASSERT(domains[1] != NULL); 3061 memset(domains, 0, sizeof(domains)); 3062 3063 /* multipath, 2 controllers report 1 memory domain each, NULL domains ptr */ 3064 rc = bdev_nvme_get_memory_domains(&nbdev, NULL, 2); 3065 CU_ASSERT(rc == 2); 3066 3067 /* multipath, 2 controllers report 1 memory domain each, array_size = 0 */ 3068 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 0); 3069 CU_ASSERT(rc == 2); 3070 CU_ASSERT(domains[0] == NULL); 3071 CU_ASSERT(domains[1] == NULL); 3072 3073 /* multipath, 2 controllers report 1 memory domain each, array_size = 1 */ 3074 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 1); 3075 CU_ASSERT(rc == 2); 3076 CU_ASSERT(domains[0] != NULL); 3077 CU_ASSERT(domains[1] == NULL); 3078 memset(domains, 0, sizeof(domains)); 3079 3080 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) */ 3081 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3082 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 4); 3083 CU_ASSERT(rc == 4); 3084 CU_ASSERT(domains[0] != NULL); 3085 CU_ASSERT(domains[1] != NULL); 3086 CU_ASSERT(domains[2] != NULL); 3087 CU_ASSERT(domains[3] != NULL); 3088 memset(domains, 0, sizeof(domains)); 3089 3090 /* multipath, 2 controllers report 2 memory domain each (not possible, just for test) 3091 * Array size is less than the number of memory domains */ 3092 MOCK_SET(spdk_nvme_ctrlr_get_memory_domains, 2); 3093 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 3); 3094 CU_ASSERT(rc == 4); 3095 CU_ASSERT(domains[0] != NULL); 3096 CU_ASSERT(domains[1] != NULL); 3097 CU_ASSERT(domains[2] != NULL); 3098 CU_ASSERT(domains[3] == NULL); 3099 memset(domains, 0, sizeof(domains)); 3100 3101 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domains); 3102 } 3103 3104 static void 3105 test_reconnect_qpair(void) 3106 { 3107 struct spdk_nvme_transport_id trid = {}; 3108 struct spdk_nvme_ctrlr *ctrlr; 3109 struct nvme_ctrlr *nvme_ctrlr; 3110 const int STRING_SIZE = 32; 3111 const char *attached_names[STRING_SIZE]; 3112 struct nvme_bdev *bdev; 3113 struct spdk_io_channel *ch1, *ch2; 3114 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3115 struct nvme_io_path *io_path1, *io_path2; 3116 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 3117 int rc; 3118 3119 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3120 ut_init_trid(&trid); 3121 3122 set_thread(0); 3123 3124 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 3125 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 3126 3127 g_ut_attach_ctrlr_status = 0; 3128 g_ut_attach_bdev_count = 1; 3129 3130 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 3131 attach_ctrlr_done, NULL, NULL, NULL, false); 3132 CU_ASSERT(rc == 0); 3133 3134 spdk_delay_us(1000); 3135 poll_threads(); 3136 3137 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 3138 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 3139 3140 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 3141 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3142 3143 ch1 = spdk_get_io_channel(bdev); 3144 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3145 3146 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3147 io_path1 = STAILQ_FIRST(&nbdev_ch1->io_path_list); 3148 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3149 nvme_qpair1 = io_path1->qpair; 3150 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 3151 3152 set_thread(1); 3153 3154 ch2 = spdk_get_io_channel(bdev); 3155 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3156 3157 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3158 io_path2 = STAILQ_FIRST(&nbdev_ch2->io_path_list); 3159 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3160 nvme_qpair2 = io_path2->qpair; 3161 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 3162 3163 /* If a qpair is disconnected, it is freed and then reconnected via 3164 * resetting the corresponding nvme_ctrlr. 3165 */ 3166 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3167 ctrlr->is_failed = true; 3168 3169 poll_thread_times(1, 3); 3170 CU_ASSERT(nvme_qpair1->qpair != NULL); 3171 CU_ASSERT(nvme_qpair2->qpair == NULL); 3172 CU_ASSERT(nvme_ctrlr->resetting == true); 3173 3174 poll_thread_times(0, 3); 3175 CU_ASSERT(nvme_qpair1->qpair == NULL); 3176 CU_ASSERT(nvme_qpair2->qpair == NULL); 3177 CU_ASSERT(ctrlr->is_failed == true); 3178 3179 poll_thread_times(1, 2); 3180 poll_thread_times(0, 1); 3181 CU_ASSERT(ctrlr->is_failed == false); 3182 CU_ASSERT(ctrlr->adminq.is_connected == false); 3183 3184 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3185 poll_thread_times(0, 2); 3186 CU_ASSERT(ctrlr->adminq.is_connected == true); 3187 3188 poll_thread_times(0, 1); 3189 poll_thread_times(1, 1); 3190 CU_ASSERT(nvme_qpair1->qpair != NULL); 3191 CU_ASSERT(nvme_qpair2->qpair != NULL); 3192 CU_ASSERT(nvme_ctrlr->resetting == true); 3193 3194 poll_thread_times(0, 2); 3195 poll_thread_times(1, 1); 3196 poll_thread_times(0, 1); 3197 CU_ASSERT(nvme_ctrlr->resetting == false); 3198 3199 poll_threads(); 3200 3201 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 3202 * fails, the qpair is just freed. 3203 */ 3204 nvme_qpair2->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 3205 ctrlr->is_failed = true; 3206 ctrlr->fail_reset = true; 3207 3208 poll_thread_times(1, 3); 3209 CU_ASSERT(nvme_qpair1->qpair != NULL); 3210 CU_ASSERT(nvme_qpair2->qpair == NULL); 3211 CU_ASSERT(nvme_ctrlr->resetting == true); 3212 3213 poll_thread_times(0, 3); 3214 poll_thread_times(1, 1); 3215 CU_ASSERT(nvme_qpair1->qpair == NULL); 3216 CU_ASSERT(nvme_qpair2->qpair == NULL); 3217 CU_ASSERT(ctrlr->is_failed == true); 3218 3219 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3220 poll_thread_times(0, 3); 3221 poll_thread_times(1, 1); 3222 poll_thread_times(0, 1); 3223 CU_ASSERT(ctrlr->is_failed == true); 3224 CU_ASSERT(nvme_ctrlr->resetting == false); 3225 CU_ASSERT(nvme_qpair1->qpair == NULL); 3226 CU_ASSERT(nvme_qpair2->qpair == NULL); 3227 3228 poll_threads(); 3229 3230 spdk_put_io_channel(ch2); 3231 3232 set_thread(0); 3233 3234 spdk_put_io_channel(ch1); 3235 3236 poll_threads(); 3237 3238 rc = bdev_nvme_delete("nvme0", &g_any_path); 3239 CU_ASSERT(rc == 0); 3240 3241 poll_threads(); 3242 spdk_delay_us(1000); 3243 poll_threads(); 3244 3245 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 3246 } 3247 3248 static void 3249 test_create_bdev_ctrlr(void) 3250 { 3251 struct nvme_path_id path1 = {}, path2 = {}; 3252 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3253 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3254 const int STRING_SIZE = 32; 3255 const char *attached_names[STRING_SIZE]; 3256 int rc; 3257 3258 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3259 ut_init_trid(&path1.trid); 3260 ut_init_trid2(&path2.trid); 3261 3262 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3263 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3264 3265 g_ut_attach_ctrlr_status = 0; 3266 g_ut_attach_bdev_count = 0; 3267 3268 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3269 attach_ctrlr_done, NULL, NULL, NULL, true); 3270 CU_ASSERT(rc == 0); 3271 3272 spdk_delay_us(1000); 3273 poll_threads(); 3274 3275 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3276 poll_threads(); 3277 3278 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3279 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3280 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3281 3282 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 3283 g_ut_attach_ctrlr_status = -EINVAL; 3284 3285 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3286 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3287 3288 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 3289 3290 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3291 attach_ctrlr_done, NULL, NULL, NULL, true); 3292 CU_ASSERT(rc == 0); 3293 3294 spdk_delay_us(1000); 3295 poll_threads(); 3296 3297 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3298 poll_threads(); 3299 3300 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3301 3302 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 3303 g_ut_attach_ctrlr_status = 0; 3304 3305 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3306 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3307 3308 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3309 attach_ctrlr_done, NULL, NULL, NULL, true); 3310 CU_ASSERT(rc == 0); 3311 3312 spdk_delay_us(1000); 3313 poll_threads(); 3314 3315 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3316 poll_threads(); 3317 3318 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3319 3320 /* Delete two ctrlrs at once. */ 3321 rc = bdev_nvme_delete("nvme0", &g_any_path); 3322 CU_ASSERT(rc == 0); 3323 3324 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3325 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3326 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3327 3328 poll_threads(); 3329 spdk_delay_us(1000); 3330 poll_threads(); 3331 3332 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3333 3334 /* Add two ctrlrs and delete one by one. */ 3335 ctrlr1 = ut_attach_ctrlr(&path1.trid, 0, true, true); 3336 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3337 3338 ctrlr2 = ut_attach_ctrlr(&path2.trid, 0, true, true); 3339 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3340 3341 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3342 attach_ctrlr_done, NULL, NULL, NULL, true); 3343 CU_ASSERT(rc == 0); 3344 3345 spdk_delay_us(1000); 3346 poll_threads(); 3347 3348 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3349 poll_threads(); 3350 3351 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3352 attach_ctrlr_done, NULL, NULL, NULL, true); 3353 CU_ASSERT(rc == 0); 3354 3355 spdk_delay_us(1000); 3356 poll_threads(); 3357 3358 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3359 poll_threads(); 3360 3361 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3362 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3363 3364 rc = bdev_nvme_delete("nvme0", &path1); 3365 CU_ASSERT(rc == 0); 3366 3367 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3368 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) != NULL); 3369 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3370 3371 poll_threads(); 3372 spdk_delay_us(1000); 3373 poll_threads(); 3374 3375 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3376 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3377 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3378 3379 rc = bdev_nvme_delete("nvme0", &path2); 3380 CU_ASSERT(rc == 0); 3381 3382 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3383 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3384 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) != NULL); 3385 3386 poll_threads(); 3387 spdk_delay_us(1000); 3388 poll_threads(); 3389 3390 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3391 } 3392 3393 static struct nvme_ns * 3394 _nvme_bdev_get_ns(struct nvme_bdev *bdev, struct nvme_ctrlr *nvme_ctrlr) 3395 { 3396 struct nvme_ns *nvme_ns; 3397 3398 TAILQ_FOREACH(nvme_ns, &bdev->nvme_ns_list, tailq) { 3399 if (nvme_ns->ctrlr == nvme_ctrlr) { 3400 return nvme_ns; 3401 } 3402 } 3403 3404 return NULL; 3405 } 3406 3407 static void 3408 test_add_multi_ns_to_bdev(void) 3409 { 3410 struct nvme_path_id path1 = {}, path2 = {}; 3411 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3412 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3413 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3414 struct nvme_ns *nvme_ns1, *nvme_ns2; 3415 struct nvme_bdev *bdev1, *bdev2, *bdev3, *bdev4; 3416 const int STRING_SIZE = 32; 3417 const char *attached_names[STRING_SIZE]; 3418 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3419 struct spdk_uuid uuid2 = { .u.raw = { 0x2 } }; 3420 struct spdk_uuid uuid3 = { .u.raw = { 0x3 } }; 3421 struct spdk_uuid uuid4 = { .u.raw = { 0x4 } }; 3422 struct spdk_uuid uuid44 = { .u.raw = { 0x44 } }; 3423 int rc; 3424 3425 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3426 ut_init_trid(&path1.trid); 3427 ut_init_trid2(&path2.trid); 3428 3429 /* Create nvme_bdevs, some of which have shared namespaces between two ctrlrs. */ 3430 3431 /* Attach 1st ctrlr, whose max number of namespaces is 5, and 1st, 3rd, and 4th 3432 * namespaces are populated. 3433 */ 3434 ctrlr1 = ut_attach_ctrlr(&path1.trid, 5, true, true); 3435 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3436 3437 ctrlr1->ns[1].is_active = false; 3438 ctrlr1->ns[4].is_active = false; 3439 ctrlr1->ns[0].uuid = &uuid1; 3440 ctrlr1->ns[2].uuid = &uuid3; 3441 ctrlr1->ns[3].uuid = &uuid4; 3442 3443 g_ut_attach_ctrlr_status = 0; 3444 g_ut_attach_bdev_count = 3; 3445 3446 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3447 attach_ctrlr_done, NULL, NULL, NULL, true); 3448 CU_ASSERT(rc == 0); 3449 3450 spdk_delay_us(1000); 3451 poll_threads(); 3452 3453 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3454 poll_threads(); 3455 3456 /* Attach 2nd ctrlr, whose max number of namespaces is 5, and 1st, 2nd, and 4th 3457 * namespaces are populated. The uuid of 4th namespace is different, and hence 3458 * adding 4th namespace to a bdev should fail. 3459 */ 3460 ctrlr2 = ut_attach_ctrlr(&path2.trid, 5, true, true); 3461 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3462 3463 ctrlr2->ns[2].is_active = false; 3464 ctrlr2->ns[4].is_active = false; 3465 ctrlr2->ns[0].uuid = &uuid1; 3466 ctrlr2->ns[1].uuid = &uuid2; 3467 ctrlr2->ns[3].uuid = &uuid44; 3468 3469 g_ut_attach_ctrlr_status = 0; 3470 g_ut_attach_bdev_count = 2; 3471 3472 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3473 attach_ctrlr_done, NULL, NULL, NULL, true); 3474 CU_ASSERT(rc == 0); 3475 3476 spdk_delay_us(1000); 3477 poll_threads(); 3478 3479 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3480 poll_threads(); 3481 3482 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3483 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3484 3485 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3486 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3487 3488 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 1) != NULL); 3489 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 2) == NULL); 3490 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 3) != NULL); 3491 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 4) != NULL); 3492 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr1, 5) == NULL); 3493 3494 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3495 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3496 3497 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 1) != NULL); 3498 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 2) != NULL); 3499 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 3) == NULL); 3500 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 4) == NULL); 3501 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr2, 5) == NULL); 3502 3503 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3504 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3505 bdev2 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 2); 3506 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 3507 bdev3 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 3); 3508 SPDK_CU_ASSERT_FATAL(bdev3 != NULL); 3509 bdev4 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 4); 3510 SPDK_CU_ASSERT_FATAL(bdev4 != NULL); 3511 CU_ASSERT(nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 5) == NULL); 3512 3513 CU_ASSERT(bdev1->ref == 2); 3514 CU_ASSERT(bdev2->ref == 1); 3515 CU_ASSERT(bdev3->ref == 1); 3516 CU_ASSERT(bdev4->ref == 1); 3517 3518 /* Test if nvme_bdevs can be deleted by deleting ctrlr one by one. */ 3519 rc = bdev_nvme_delete("nvme0", &path1); 3520 CU_ASSERT(rc == 0); 3521 3522 poll_threads(); 3523 spdk_delay_us(1000); 3524 poll_threads(); 3525 3526 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nbdev_ctrlr); 3527 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == NULL); 3528 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == nvme_ctrlr2); 3529 3530 rc = bdev_nvme_delete("nvme0", &path2); 3531 CU_ASSERT(rc == 0); 3532 3533 poll_threads(); 3534 spdk_delay_us(1000); 3535 poll_threads(); 3536 3537 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3538 3539 /* Test if a nvme_bdev which has a shared namespace between two ctrlrs 3540 * can be deleted when the bdev subsystem shutdown. 3541 */ 3542 g_ut_attach_bdev_count = 1; 3543 3544 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3545 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3546 3547 ctrlr1->ns[0].uuid = &uuid1; 3548 3549 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, 32, 3550 attach_ctrlr_done, NULL, NULL, NULL, true); 3551 CU_ASSERT(rc == 0); 3552 3553 spdk_delay_us(1000); 3554 poll_threads(); 3555 3556 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3557 poll_threads(); 3558 3559 ut_init_trid2(&path2.trid); 3560 3561 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3562 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3563 3564 ctrlr2->ns[0].uuid = &uuid1; 3565 3566 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, 32, 3567 attach_ctrlr_done, NULL, NULL, NULL, true); 3568 CU_ASSERT(rc == 0); 3569 3570 spdk_delay_us(1000); 3571 poll_threads(); 3572 3573 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3574 poll_threads(); 3575 3576 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3577 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3578 3579 bdev1 = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3580 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 3581 3582 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3583 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3584 3585 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3586 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3587 3588 /* Check if a nvme_bdev has two nvme_ns. */ 3589 nvme_ns1 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr1); 3590 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3591 CU_ASSERT(nvme_ns1->bdev == bdev1); 3592 3593 nvme_ns2 = _nvme_bdev_get_ns(bdev1, nvme_ctrlr2); 3594 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3595 CU_ASSERT(nvme_ns2->bdev == bdev1); 3596 3597 /* Delete nvme_bdev first when the bdev subsystem shutdown. */ 3598 bdev_nvme_destruct(&bdev1->disk); 3599 3600 poll_threads(); 3601 3602 CU_ASSERT(nvme_ns1->bdev == NULL); 3603 CU_ASSERT(nvme_ns2->bdev == NULL); 3604 3605 nvme_ctrlr1->destruct = true; 3606 _nvme_ctrlr_destruct(nvme_ctrlr1); 3607 3608 poll_threads(); 3609 spdk_delay_us(1000); 3610 poll_threads(); 3611 3612 nvme_ctrlr2->destruct = true; 3613 _nvme_ctrlr_destruct(nvme_ctrlr2); 3614 3615 poll_threads(); 3616 spdk_delay_us(1000); 3617 poll_threads(); 3618 3619 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3620 } 3621 3622 static void 3623 test_add_multi_io_paths_to_nbdev_ch(void) 3624 { 3625 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 3626 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 3627 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3628 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2, *nvme_ctrlr3; 3629 struct nvme_ns *nvme_ns1, *nvme_ns2, *nvme_ns3; 3630 const int STRING_SIZE = 32; 3631 const char *attached_names[STRING_SIZE]; 3632 struct nvme_bdev *bdev; 3633 struct spdk_io_channel *ch; 3634 struct nvme_bdev_channel *nbdev_ch; 3635 struct nvme_io_path *io_path1, *io_path2, *io_path3; 3636 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3637 int rc; 3638 3639 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3640 ut_init_trid(&path1.trid); 3641 ut_init_trid2(&path2.trid); 3642 ut_init_trid3(&path3.trid); 3643 g_ut_attach_ctrlr_status = 0; 3644 g_ut_attach_bdev_count = 1; 3645 3646 set_thread(1); 3647 3648 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3649 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3650 3651 ctrlr1->ns[0].uuid = &uuid1; 3652 3653 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3654 attach_ctrlr_done, NULL, NULL, NULL, true); 3655 CU_ASSERT(rc == 0); 3656 3657 spdk_delay_us(1000); 3658 poll_threads(); 3659 3660 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3661 poll_threads(); 3662 3663 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3664 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3665 3666 ctrlr2->ns[0].uuid = &uuid1; 3667 3668 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3669 attach_ctrlr_done, NULL, NULL, NULL, true); 3670 CU_ASSERT(rc == 0); 3671 3672 spdk_delay_us(1000); 3673 poll_threads(); 3674 3675 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3676 poll_threads(); 3677 3678 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3679 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3680 3681 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3682 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3683 3684 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3685 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3686 3687 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3688 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3689 3690 nvme_ns1 = _nvme_bdev_get_ns(bdev, nvme_ctrlr1); 3691 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 3692 3693 nvme_ns2 = _nvme_bdev_get_ns(bdev, nvme_ctrlr2); 3694 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 3695 3696 set_thread(0); 3697 3698 ch = spdk_get_io_channel(bdev); 3699 SPDK_CU_ASSERT_FATAL(ch != NULL); 3700 nbdev_ch = spdk_io_channel_get_ctx(ch); 3701 3702 io_path1 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns1); 3703 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 3704 3705 io_path2 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns2); 3706 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 3707 3708 set_thread(1); 3709 3710 /* Check if I/O path is dynamically added to nvme_bdev_channel. */ 3711 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 3712 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 3713 3714 ctrlr3->ns[0].uuid = &uuid1; 3715 3716 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 3717 attach_ctrlr_done, NULL, NULL, NULL, true); 3718 CU_ASSERT(rc == 0); 3719 3720 spdk_delay_us(1000); 3721 poll_threads(); 3722 3723 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3724 poll_threads(); 3725 3726 nvme_ctrlr3 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid); 3727 SPDK_CU_ASSERT_FATAL(nvme_ctrlr3 != NULL); 3728 3729 nvme_ns3 = _nvme_bdev_get_ns(bdev, nvme_ctrlr3); 3730 SPDK_CU_ASSERT_FATAL(nvme_ns3 != NULL); 3731 3732 io_path3 = _bdev_nvme_get_io_path(nbdev_ch, nvme_ns3); 3733 SPDK_CU_ASSERT_FATAL(io_path3 != NULL); 3734 3735 /* Check if I/O path is dynamically deleted from nvme_bdev_channel. */ 3736 rc = bdev_nvme_delete("nvme0", &path2); 3737 CU_ASSERT(rc == 0); 3738 3739 poll_threads(); 3740 spdk_delay_us(1000); 3741 poll_threads(); 3742 3743 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid) == nvme_ctrlr1); 3744 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid) == NULL); 3745 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path3.trid) == nvme_ctrlr3); 3746 3747 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns1) == io_path1); 3748 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns2) == NULL); 3749 CU_ASSERT(_bdev_nvme_get_io_path(nbdev_ch, nvme_ns3) == io_path3); 3750 3751 set_thread(0); 3752 3753 spdk_put_io_channel(ch); 3754 3755 poll_threads(); 3756 3757 set_thread(1); 3758 3759 rc = bdev_nvme_delete("nvme0", &g_any_path); 3760 CU_ASSERT(rc == 0); 3761 3762 poll_threads(); 3763 spdk_delay_us(1000); 3764 poll_threads(); 3765 3766 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3767 } 3768 3769 static void 3770 test_admin_path(void) 3771 { 3772 struct nvme_path_id path1 = {}, path2 = {}; 3773 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3774 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3775 const int STRING_SIZE = 32; 3776 const char *attached_names[STRING_SIZE]; 3777 struct nvme_bdev *bdev; 3778 struct spdk_io_channel *ch; 3779 struct spdk_bdev_io *bdev_io; 3780 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 3781 int rc; 3782 3783 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3784 ut_init_trid(&path1.trid); 3785 ut_init_trid2(&path2.trid); 3786 g_ut_attach_ctrlr_status = 0; 3787 g_ut_attach_bdev_count = 1; 3788 3789 set_thread(0); 3790 3791 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3792 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3793 3794 ctrlr1->ns[0].uuid = &uuid1; 3795 3796 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3797 attach_ctrlr_done, NULL, NULL, NULL, true); 3798 CU_ASSERT(rc == 0); 3799 3800 spdk_delay_us(1000); 3801 poll_threads(); 3802 3803 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3804 poll_threads(); 3805 3806 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3807 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3808 3809 ctrlr2->ns[0].uuid = &uuid1; 3810 3811 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3812 attach_ctrlr_done, NULL, NULL, NULL, true); 3813 CU_ASSERT(rc == 0); 3814 3815 spdk_delay_us(1000); 3816 poll_threads(); 3817 3818 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3819 poll_threads(); 3820 3821 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3822 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3823 3824 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3825 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3826 3827 ch = spdk_get_io_channel(bdev); 3828 SPDK_CU_ASSERT_FATAL(ch != NULL); 3829 3830 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, ch); 3831 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 3832 3833 /* ctrlr1 is failed but ctrlr2 is not failed. admin command is 3834 * submitted to ctrlr2. 3835 */ 3836 ctrlr1->is_failed = true; 3837 bdev_io->internal.in_submit_request = true; 3838 3839 bdev_nvme_submit_request(ch, bdev_io); 3840 3841 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3842 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 1); 3843 CU_ASSERT(bdev_io->internal.in_submit_request == true); 3844 3845 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3846 poll_threads(); 3847 3848 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3849 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3850 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 3851 3852 /* both ctrlr1 and ctrlr2 are failed. admin command is failed to submit. */ 3853 ctrlr2->is_failed = true; 3854 bdev_io->internal.in_submit_request = true; 3855 3856 bdev_nvme_submit_request(ch, bdev_io); 3857 3858 CU_ASSERT(ctrlr1->adminq.num_outstanding_reqs == 0); 3859 CU_ASSERT(ctrlr2->adminq.num_outstanding_reqs == 0); 3860 CU_ASSERT(bdev_io->internal.in_submit_request == false); 3861 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 3862 3863 free(bdev_io); 3864 3865 spdk_put_io_channel(ch); 3866 3867 poll_threads(); 3868 3869 rc = bdev_nvme_delete("nvme0", &g_any_path); 3870 CU_ASSERT(rc == 0); 3871 3872 poll_threads(); 3873 spdk_delay_us(1000); 3874 poll_threads(); 3875 3876 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 3877 } 3878 3879 static struct nvme_io_path * 3880 ut_get_io_path_by_ctrlr(struct nvme_bdev_channel *nbdev_ch, 3881 struct nvme_ctrlr *nvme_ctrlr) 3882 { 3883 struct nvme_io_path *io_path; 3884 3885 STAILQ_FOREACH(io_path, &nbdev_ch->io_path_list, stailq) { 3886 if (io_path->qpair->ctrlr == nvme_ctrlr) { 3887 return io_path; 3888 } 3889 } 3890 3891 return NULL; 3892 } 3893 3894 static void 3895 test_reset_bdev_ctrlr(void) 3896 { 3897 struct nvme_path_id path1 = {}, path2 = {}; 3898 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 3899 struct nvme_bdev_ctrlr *nbdev_ctrlr; 3900 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 3901 struct nvme_path_id *curr_path1, *curr_path2; 3902 const int STRING_SIZE = 32; 3903 const char *attached_names[STRING_SIZE]; 3904 struct nvme_bdev *bdev; 3905 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 3906 struct nvme_bdev_io *first_bio; 3907 struct spdk_io_channel *ch1, *ch2; 3908 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 3909 struct nvme_io_path *io_path11, *io_path12, *io_path21, *io_path22; 3910 int rc; 3911 3912 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 3913 ut_init_trid(&path1.trid); 3914 ut_init_trid2(&path2.trid); 3915 g_ut_attach_ctrlr_status = 0; 3916 g_ut_attach_bdev_count = 1; 3917 3918 set_thread(0); 3919 3920 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 3921 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 3922 3923 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 3924 attach_ctrlr_done, NULL, NULL, NULL, true); 3925 CU_ASSERT(rc == 0); 3926 3927 spdk_delay_us(1000); 3928 poll_threads(); 3929 3930 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3931 poll_threads(); 3932 3933 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 3934 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 3935 3936 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 3937 attach_ctrlr_done, NULL, NULL, NULL, true); 3938 CU_ASSERT(rc == 0); 3939 3940 spdk_delay_us(1000); 3941 poll_threads(); 3942 3943 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 3944 poll_threads(); 3945 3946 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 3947 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 3948 3949 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 3950 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 3951 3952 curr_path1 = TAILQ_FIRST(&nvme_ctrlr1->trids); 3953 SPDK_CU_ASSERT_FATAL(curr_path1 != NULL); 3954 3955 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 3956 SPDK_CU_ASSERT_FATAL(nvme_ctrlr2 != NULL); 3957 3958 curr_path2 = TAILQ_FIRST(&nvme_ctrlr2->trids); 3959 SPDK_CU_ASSERT_FATAL(curr_path2 != NULL); 3960 3961 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 3962 SPDK_CU_ASSERT_FATAL(bdev != NULL); 3963 3964 set_thread(0); 3965 3966 ch1 = spdk_get_io_channel(bdev); 3967 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 3968 3969 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 3970 io_path11 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr1); 3971 SPDK_CU_ASSERT_FATAL(io_path11 != NULL); 3972 io_path12 = ut_get_io_path_by_ctrlr(nbdev_ch1, nvme_ctrlr2); 3973 SPDK_CU_ASSERT_FATAL(io_path12 != NULL); 3974 3975 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 3976 first_bio = (struct nvme_bdev_io *)first_bdev_io->driver_ctx; 3977 3978 set_thread(1); 3979 3980 ch2 = spdk_get_io_channel(bdev); 3981 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 3982 3983 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 3984 io_path21 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr1); 3985 SPDK_CU_ASSERT_FATAL(io_path21 != NULL); 3986 io_path22 = ut_get_io_path_by_ctrlr(nbdev_ch2, nvme_ctrlr2); 3987 SPDK_CU_ASSERT_FATAL(io_path22 != NULL); 3988 3989 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 3990 3991 /* The first reset request from bdev_io is submitted on thread 0. 3992 * Check if ctrlr1 is reset and then ctrlr2 is reset. 3993 * 3994 * A few extra polls are necessary after resetting ctrlr1 to check 3995 * pending reset requests for ctrlr1. 3996 */ 3997 ctrlr1->is_failed = true; 3998 curr_path1->is_failed = true; 3999 ctrlr2->is_failed = true; 4000 curr_path2->is_failed = true; 4001 4002 set_thread(0); 4003 4004 bdev_nvme_submit_request(ch1, first_bdev_io); 4005 CU_ASSERT(first_bio->io_path == io_path11); 4006 CU_ASSERT(nvme_ctrlr1->resetting == true); 4007 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 4008 4009 poll_thread_times(0, 3); 4010 CU_ASSERT(io_path11->qpair->qpair == NULL); 4011 CU_ASSERT(io_path21->qpair->qpair != NULL); 4012 4013 poll_thread_times(1, 2); 4014 CU_ASSERT(io_path11->qpair->qpair == NULL); 4015 CU_ASSERT(io_path21->qpair->qpair == NULL); 4016 CU_ASSERT(ctrlr1->is_failed == true); 4017 4018 poll_thread_times(0, 1); 4019 CU_ASSERT(nvme_ctrlr1->resetting == true); 4020 CU_ASSERT(ctrlr1->is_failed == false); 4021 CU_ASSERT(ctrlr1->adminq.is_connected == false); 4022 CU_ASSERT(curr_path1->is_failed == true); 4023 4024 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4025 poll_thread_times(0, 2); 4026 CU_ASSERT(ctrlr1->adminq.is_connected == true); 4027 4028 poll_thread_times(0, 1); 4029 CU_ASSERT(io_path11->qpair->qpair != NULL); 4030 CU_ASSERT(io_path21->qpair->qpair == NULL); 4031 4032 poll_thread_times(1, 1); 4033 CU_ASSERT(io_path11->qpair->qpair != NULL); 4034 CU_ASSERT(io_path21->qpair->qpair != NULL); 4035 4036 poll_thread_times(0, 2); 4037 CU_ASSERT(nvme_ctrlr1->resetting == true); 4038 poll_thread_times(1, 1); 4039 CU_ASSERT(nvme_ctrlr1->resetting == true); 4040 poll_thread_times(0, 2); 4041 CU_ASSERT(nvme_ctrlr1->resetting == false); 4042 CU_ASSERT(curr_path1->is_failed == false); 4043 CU_ASSERT(first_bio->io_path == io_path12); 4044 CU_ASSERT(nvme_ctrlr2->resetting == true); 4045 4046 poll_thread_times(0, 3); 4047 CU_ASSERT(io_path12->qpair->qpair == NULL); 4048 CU_ASSERT(io_path22->qpair->qpair != NULL); 4049 4050 poll_thread_times(1, 2); 4051 CU_ASSERT(io_path12->qpair->qpair == NULL); 4052 CU_ASSERT(io_path22->qpair->qpair == NULL); 4053 CU_ASSERT(ctrlr2->is_failed == true); 4054 4055 poll_thread_times(0, 1); 4056 CU_ASSERT(nvme_ctrlr2->resetting == true); 4057 CU_ASSERT(ctrlr2->is_failed == false); 4058 CU_ASSERT(ctrlr2->adminq.is_connected == false); 4059 CU_ASSERT(curr_path2->is_failed == true); 4060 4061 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4062 poll_thread_times(0, 2); 4063 CU_ASSERT(ctrlr2->adminq.is_connected == true); 4064 4065 poll_thread_times(0, 1); 4066 CU_ASSERT(io_path12->qpair->qpair != NULL); 4067 CU_ASSERT(io_path22->qpair->qpair == NULL); 4068 4069 poll_thread_times(1, 2); 4070 CU_ASSERT(io_path12->qpair->qpair != NULL); 4071 CU_ASSERT(io_path22->qpair->qpair != NULL); 4072 4073 poll_thread_times(0, 2); 4074 CU_ASSERT(nvme_ctrlr2->resetting == true); 4075 poll_thread_times(1, 1); 4076 CU_ASSERT(nvme_ctrlr2->resetting == true); 4077 poll_thread_times(0, 2); 4078 CU_ASSERT(first_bio->io_path == NULL); 4079 CU_ASSERT(nvme_ctrlr2->resetting == false); 4080 CU_ASSERT(curr_path2->is_failed == false); 4081 4082 poll_threads(); 4083 4084 /* There is a race between two reset requests from bdev_io. 4085 * 4086 * The first reset request is submitted on thread 0, and the second reset 4087 * request is submitted on thread 1 while the first is resetting ctrlr1. 4088 * The second is pending on ctrlr1. After the first completes resetting ctrlr1, 4089 * both reset requests go to ctrlr2. The first comes earlier than the second. 4090 * The second is pending on ctrlr2 again. After the first completes resetting 4091 * ctrl2, both complete successfully. 4092 */ 4093 ctrlr1->is_failed = true; 4094 curr_path1->is_failed = true; 4095 ctrlr2->is_failed = true; 4096 curr_path2->is_failed = true; 4097 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4098 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 4099 4100 set_thread(0); 4101 4102 bdev_nvme_submit_request(ch1, first_bdev_io); 4103 4104 set_thread(1); 4105 4106 bdev_nvme_submit_request(ch2, second_bdev_io); 4107 4108 CU_ASSERT(nvme_ctrlr1->resetting == true); 4109 CU_ASSERT(nvme_ctrlr1->reset_cb_arg == first_bio); 4110 CU_ASSERT(TAILQ_FIRST(&io_path21->qpair->ctrlr_ch->pending_resets) == second_bdev_io); 4111 4112 poll_threads(); 4113 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4114 poll_threads(); 4115 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4116 poll_threads(); 4117 4118 CU_ASSERT(ctrlr1->is_failed == false); 4119 CU_ASSERT(curr_path1->is_failed == false); 4120 CU_ASSERT(ctrlr2->is_failed == false); 4121 CU_ASSERT(curr_path2->is_failed == false); 4122 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4123 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4124 4125 set_thread(0); 4126 4127 spdk_put_io_channel(ch1); 4128 4129 set_thread(1); 4130 4131 spdk_put_io_channel(ch2); 4132 4133 poll_threads(); 4134 4135 set_thread(0); 4136 4137 rc = bdev_nvme_delete("nvme0", &g_any_path); 4138 CU_ASSERT(rc == 0); 4139 4140 poll_threads(); 4141 spdk_delay_us(1000); 4142 poll_threads(); 4143 4144 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4145 4146 free(first_bdev_io); 4147 free(second_bdev_io); 4148 } 4149 4150 static void 4151 test_find_io_path(void) 4152 { 4153 struct nvme_bdev_channel nbdev_ch = { 4154 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 4155 }; 4156 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}; 4157 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}; 4158 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }, nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 4159 struct nvme_ctrlr_channel ctrlr_ch1 = {}, ctrlr_ch2 = {}; 4160 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, }; 4161 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, }; 4162 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 4163 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 4164 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 4165 4166 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 4167 4168 /* Test if io_path whose ANA state is not accessible is excluded. */ 4169 4170 nvme_qpair1.qpair = &qpair1; 4171 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4172 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4173 4174 nvme_ns1.ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 4175 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4176 4177 nvme_ns1.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 4178 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4179 4180 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4181 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4182 4183 nbdev_ch.current_io_path = NULL; 4184 4185 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4186 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4187 4188 nbdev_ch.current_io_path = NULL; 4189 4190 /* Test if io_path whose qpair is resetting is excluded. */ 4191 4192 nvme_qpair1.qpair = NULL; 4193 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == NULL); 4194 4195 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 4196 4197 /* Test if ANA optimized state or the first found ANA non-optimized state 4198 * is prioritized. 4199 */ 4200 4201 nvme_qpair1.qpair = &qpair1; 4202 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4203 nvme_qpair2.qpair = &qpair2; 4204 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4205 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 4206 4207 nbdev_ch.current_io_path = NULL; 4208 4209 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 4210 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 4211 4212 nbdev_ch.current_io_path = NULL; 4213 } 4214 4215 static void 4216 test_retry_io_if_ana_state_is_updating(void) 4217 { 4218 struct nvme_path_id path = {}; 4219 struct nvme_ctrlr_opts opts = {}; 4220 struct spdk_nvme_ctrlr *ctrlr; 4221 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4222 struct nvme_ctrlr *nvme_ctrlr; 4223 const int STRING_SIZE = 32; 4224 const char *attached_names[STRING_SIZE]; 4225 struct nvme_bdev *bdev; 4226 struct nvme_ns *nvme_ns; 4227 struct spdk_bdev_io *bdev_io1; 4228 struct spdk_io_channel *ch; 4229 struct nvme_bdev_channel *nbdev_ch; 4230 struct nvme_io_path *io_path; 4231 struct nvme_qpair *nvme_qpair; 4232 int rc; 4233 4234 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4235 ut_init_trid(&path.trid); 4236 4237 set_thread(0); 4238 4239 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4240 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4241 4242 g_ut_attach_ctrlr_status = 0; 4243 g_ut_attach_bdev_count = 1; 4244 4245 opts.ctrlr_loss_timeout_sec = -1; 4246 opts.reconnect_delay_sec = 1; 4247 4248 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4249 attach_ctrlr_done, NULL, NULL, &opts, false); 4250 CU_ASSERT(rc == 0); 4251 4252 spdk_delay_us(1000); 4253 poll_threads(); 4254 4255 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4256 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4257 4258 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4259 CU_ASSERT(nvme_ctrlr != NULL); 4260 4261 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4262 CU_ASSERT(bdev != NULL); 4263 4264 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4265 CU_ASSERT(nvme_ns != NULL); 4266 4267 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4268 ut_bdev_io_set_buf(bdev_io1); 4269 4270 ch = spdk_get_io_channel(bdev); 4271 SPDK_CU_ASSERT_FATAL(ch != NULL); 4272 4273 nbdev_ch = spdk_io_channel_get_ctx(ch); 4274 4275 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4276 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4277 4278 nvme_qpair = io_path->qpair; 4279 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4280 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4281 4282 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 4283 4284 /* If qpair is connected, I/O should succeed. */ 4285 bdev_io1->internal.in_submit_request = true; 4286 4287 bdev_nvme_submit_request(ch, bdev_io1); 4288 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4289 4290 poll_threads(); 4291 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4292 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 4293 4294 /* If ANA state of namespace is inaccessible, I/O should be queued. */ 4295 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4296 nbdev_ch->current_io_path = NULL; 4297 4298 bdev_io1->internal.in_submit_request = true; 4299 4300 bdev_nvme_submit_request(ch, bdev_io1); 4301 4302 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4303 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4304 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4305 4306 /* ANA state became accessible while I/O was queued. */ 4307 nvme_ns->ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4308 4309 spdk_delay_us(1000000); 4310 4311 poll_thread_times(0, 1); 4312 4313 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4314 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 4315 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 4316 4317 poll_threads(); 4318 4319 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4320 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 4321 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4322 4323 free(bdev_io1); 4324 4325 spdk_put_io_channel(ch); 4326 4327 poll_threads(); 4328 4329 rc = bdev_nvme_delete("nvme0", &g_any_path); 4330 CU_ASSERT(rc == 0); 4331 4332 poll_threads(); 4333 spdk_delay_us(1000); 4334 poll_threads(); 4335 4336 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4337 } 4338 4339 static void 4340 test_retry_io_for_io_path_error(void) 4341 { 4342 struct nvme_path_id path1 = {}, path2 = {}; 4343 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 4344 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4345 struct nvme_ctrlr *nvme_ctrlr1, *nvme_ctrlr2; 4346 const int STRING_SIZE = 32; 4347 const char *attached_names[STRING_SIZE]; 4348 struct nvme_bdev *bdev; 4349 struct nvme_ns *nvme_ns1, *nvme_ns2; 4350 struct spdk_bdev_io *bdev_io; 4351 struct nvme_bdev_io *bio; 4352 struct spdk_io_channel *ch; 4353 struct nvme_bdev_channel *nbdev_ch; 4354 struct nvme_io_path *io_path1, *io_path2; 4355 struct nvme_qpair *nvme_qpair1, *nvme_qpair2; 4356 struct ut_nvme_req *req; 4357 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 4358 int rc; 4359 4360 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4361 ut_init_trid(&path1.trid); 4362 ut_init_trid2(&path2.trid); 4363 4364 g_opts.bdev_retry_count = 1; 4365 4366 set_thread(0); 4367 4368 g_ut_attach_ctrlr_status = 0; 4369 g_ut_attach_bdev_count = 1; 4370 4371 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 4372 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 4373 4374 ctrlr1->ns[0].uuid = &uuid1; 4375 4376 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 4377 attach_ctrlr_done, NULL, NULL, NULL, true); 4378 CU_ASSERT(rc == 0); 4379 4380 spdk_delay_us(1000); 4381 poll_threads(); 4382 4383 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4384 poll_threads(); 4385 4386 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4387 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4388 4389 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 4390 CU_ASSERT(nvme_ctrlr1 != NULL); 4391 4392 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4393 CU_ASSERT(bdev != NULL); 4394 4395 nvme_ns1 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr1); 4396 CU_ASSERT(nvme_ns1 != NULL); 4397 CU_ASSERT(nvme_ns1 == _nvme_bdev_get_ns(bdev, nvme_ctrlr1)); 4398 4399 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4400 ut_bdev_io_set_buf(bdev_io); 4401 4402 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4403 4404 ch = spdk_get_io_channel(bdev); 4405 SPDK_CU_ASSERT_FATAL(ch != NULL); 4406 4407 nbdev_ch = spdk_io_channel_get_ctx(ch); 4408 4409 io_path1 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr1); 4410 SPDK_CU_ASSERT_FATAL(io_path1 != NULL); 4411 4412 nvme_qpair1 = io_path1->qpair; 4413 SPDK_CU_ASSERT_FATAL(nvme_qpair1 != NULL); 4414 SPDK_CU_ASSERT_FATAL(nvme_qpair1->qpair != NULL); 4415 4416 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4417 4418 /* I/O got a temporary I/O path error, but it should not retry if DNR is set. */ 4419 bdev_io->internal.in_submit_request = true; 4420 4421 bdev_nvme_submit_request(ch, bdev_io); 4422 4423 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4424 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4425 4426 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4427 SPDK_CU_ASSERT_FATAL(req != NULL); 4428 4429 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4430 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4431 req->cpl.status.dnr = 1; 4432 4433 poll_thread_times(0, 1); 4434 4435 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4436 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4437 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4438 4439 /* I/O got a temporary I/O path error, but it should succeed after retry. */ 4440 bdev_io->internal.in_submit_request = true; 4441 4442 bdev_nvme_submit_request(ch, bdev_io); 4443 4444 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4445 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4446 4447 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4448 SPDK_CU_ASSERT_FATAL(req != NULL); 4449 4450 req->cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR; 4451 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4452 4453 poll_thread_times(0, 1); 4454 4455 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4456 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4457 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4458 4459 poll_threads(); 4460 4461 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4462 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4463 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4464 4465 /* Add io_path2 dynamically, and create a multipath configuration. */ 4466 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 4467 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 4468 4469 ctrlr2->ns[0].uuid = &uuid1; 4470 4471 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 4472 attach_ctrlr_done, NULL, NULL, NULL, true); 4473 CU_ASSERT(rc == 0); 4474 4475 spdk_delay_us(1000); 4476 poll_threads(); 4477 4478 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4479 poll_threads(); 4480 4481 nvme_ctrlr2 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path2.trid); 4482 CU_ASSERT(nvme_ctrlr2 != NULL); 4483 4484 nvme_ns2 = nvme_ctrlr_get_first_active_ns(nvme_ctrlr2); 4485 CU_ASSERT(nvme_ns2 != NULL); 4486 CU_ASSERT(nvme_ns2 == _nvme_bdev_get_ns(bdev, nvme_ctrlr2)); 4487 4488 io_path2 = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr2); 4489 SPDK_CU_ASSERT_FATAL(io_path2 != NULL); 4490 4491 nvme_qpair2 = io_path2->qpair; 4492 SPDK_CU_ASSERT_FATAL(nvme_qpair2 != NULL); 4493 SPDK_CU_ASSERT_FATAL(nvme_qpair2->qpair != NULL); 4494 4495 /* I/O is submitted to io_path1, but qpair of io_path1 was disconnected 4496 * and deleted. Hence the I/O was aborted. But io_path2 is available. 4497 * So after a retry, I/O is submitted to io_path2 and should succeed. 4498 */ 4499 bdev_io->internal.in_submit_request = true; 4500 4501 bdev_nvme_submit_request(ch, bdev_io); 4502 4503 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 1); 4504 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4505 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4506 4507 req = ut_get_outstanding_nvme_request(nvme_qpair1->qpair, bio); 4508 SPDK_CU_ASSERT_FATAL(req != NULL); 4509 4510 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_SQ_DELETION; 4511 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4512 4513 poll_thread_times(0, 1); 4514 4515 CU_ASSERT(nvme_qpair1->qpair->num_outstanding_reqs == 0); 4516 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4517 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4518 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4519 4520 spdk_nvme_ctrlr_free_io_qpair(nvme_qpair1->qpair); 4521 nvme_qpair1->qpair = NULL; 4522 4523 poll_threads(); 4524 4525 CU_ASSERT(nvme_qpair2->qpair->num_outstanding_reqs == 0); 4526 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4527 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4528 4529 free(bdev_io); 4530 4531 spdk_put_io_channel(ch); 4532 4533 poll_threads(); 4534 4535 rc = bdev_nvme_delete("nvme0", &g_any_path); 4536 CU_ASSERT(rc == 0); 4537 4538 poll_threads(); 4539 spdk_delay_us(1000); 4540 poll_threads(); 4541 4542 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4543 4544 g_opts.bdev_retry_count = 0; 4545 } 4546 4547 static void 4548 test_retry_io_count(void) 4549 { 4550 struct nvme_path_id path = {}; 4551 struct spdk_nvme_ctrlr *ctrlr; 4552 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4553 struct nvme_ctrlr *nvme_ctrlr; 4554 const int STRING_SIZE = 32; 4555 const char *attached_names[STRING_SIZE]; 4556 struct nvme_bdev *bdev; 4557 struct nvme_ns *nvme_ns; 4558 struct spdk_bdev_io *bdev_io; 4559 struct nvme_bdev_io *bio; 4560 struct spdk_io_channel *ch; 4561 struct nvme_bdev_channel *nbdev_ch; 4562 struct nvme_io_path *io_path; 4563 struct nvme_qpair *nvme_qpair; 4564 struct ut_nvme_req *req; 4565 int rc; 4566 4567 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4568 ut_init_trid(&path.trid); 4569 4570 set_thread(0); 4571 4572 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 4573 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4574 4575 g_ut_attach_ctrlr_status = 0; 4576 g_ut_attach_bdev_count = 1; 4577 4578 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4579 attach_ctrlr_done, NULL, NULL, NULL, false); 4580 CU_ASSERT(rc == 0); 4581 4582 spdk_delay_us(1000); 4583 poll_threads(); 4584 4585 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4586 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4587 4588 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4589 CU_ASSERT(nvme_ctrlr != NULL); 4590 4591 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4592 CU_ASSERT(bdev != NULL); 4593 4594 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4595 CU_ASSERT(nvme_ns != NULL); 4596 4597 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4598 ut_bdev_io_set_buf(bdev_io); 4599 4600 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4601 4602 ch = spdk_get_io_channel(bdev); 4603 SPDK_CU_ASSERT_FATAL(ch != NULL); 4604 4605 nbdev_ch = spdk_io_channel_get_ctx(ch); 4606 4607 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4608 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4609 4610 nvme_qpair = io_path->qpair; 4611 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4612 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4613 4614 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4615 4616 /* If I/O is aborted by request, it should not be retried. */ 4617 g_opts.bdev_retry_count = 1; 4618 4619 bdev_io->internal.in_submit_request = true; 4620 4621 bdev_nvme_submit_request(ch, bdev_io); 4622 4623 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4624 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4625 4626 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4627 SPDK_CU_ASSERT_FATAL(req != NULL); 4628 4629 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 4630 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4631 4632 poll_thread_times(0, 1); 4633 4634 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4635 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4636 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 4637 4638 /* If bio->retry_count is not less than g_opts.bdev_retry_count, 4639 * the failed I/O should not be retried. 4640 */ 4641 g_opts.bdev_retry_count = 4; 4642 4643 bdev_io->internal.in_submit_request = true; 4644 4645 bdev_nvme_submit_request(ch, bdev_io); 4646 4647 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4648 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4649 4650 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4651 SPDK_CU_ASSERT_FATAL(req != NULL); 4652 4653 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4654 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4655 bio->retry_count = 4; 4656 4657 poll_thread_times(0, 1); 4658 4659 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4660 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4661 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_NVME_ERROR); 4662 4663 /* If g_opts.bdev_retry_count is -1, the failed I/O always should be retried. */ 4664 g_opts.bdev_retry_count = -1; 4665 4666 bdev_io->internal.in_submit_request = true; 4667 4668 bdev_nvme_submit_request(ch, bdev_io); 4669 4670 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4671 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4672 4673 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4674 SPDK_CU_ASSERT_FATAL(req != NULL); 4675 4676 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4677 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4678 bio->retry_count = 4; 4679 4680 poll_thread_times(0, 1); 4681 4682 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4683 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4684 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4685 4686 poll_threads(); 4687 4688 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4689 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4690 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4691 4692 /* If bio->retry_count is less than g_opts.bdev_retry_count, 4693 * the failed I/O should be retried. 4694 */ 4695 g_opts.bdev_retry_count = 4; 4696 4697 bdev_io->internal.in_submit_request = true; 4698 4699 bdev_nvme_submit_request(ch, bdev_io); 4700 4701 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4702 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4703 4704 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4705 SPDK_CU_ASSERT_FATAL(req != NULL); 4706 4707 req->cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY; 4708 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 4709 bio->retry_count = 3; 4710 4711 poll_thread_times(0, 1); 4712 4713 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4714 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4715 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4716 4717 poll_threads(); 4718 4719 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4720 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4721 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4722 4723 free(bdev_io); 4724 4725 spdk_put_io_channel(ch); 4726 4727 poll_threads(); 4728 4729 rc = bdev_nvme_delete("nvme0", &g_any_path); 4730 CU_ASSERT(rc == 0); 4731 4732 poll_threads(); 4733 spdk_delay_us(1000); 4734 poll_threads(); 4735 4736 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4737 4738 g_opts.bdev_retry_count = 0; 4739 } 4740 4741 static void 4742 test_concurrent_read_ana_log_page(void) 4743 { 4744 struct spdk_nvme_transport_id trid = {}; 4745 struct spdk_nvme_ctrlr *ctrlr; 4746 struct nvme_ctrlr *nvme_ctrlr; 4747 const int STRING_SIZE = 32; 4748 const char *attached_names[STRING_SIZE]; 4749 int rc; 4750 4751 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4752 ut_init_trid(&trid); 4753 4754 set_thread(0); 4755 4756 ctrlr = ut_attach_ctrlr(&trid, 1, true, false); 4757 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4758 4759 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 4760 4761 g_ut_attach_ctrlr_status = 0; 4762 g_ut_attach_bdev_count = 1; 4763 4764 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 4765 attach_ctrlr_done, NULL, NULL, NULL, false); 4766 CU_ASSERT(rc == 0); 4767 4768 spdk_delay_us(1000); 4769 poll_threads(); 4770 4771 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4772 poll_threads(); 4773 4774 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 4775 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 4776 4777 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4778 4779 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4780 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4781 4782 /* Following read request should be rejected. */ 4783 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4784 4785 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4786 4787 set_thread(1); 4788 4789 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4790 4791 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 4792 4793 /* Reset request while reading ANA log page should not be rejected. */ 4794 rc = bdev_nvme_reset(nvme_ctrlr); 4795 CU_ASSERT(rc == 0); 4796 4797 poll_threads(); 4798 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4799 poll_threads(); 4800 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4801 poll_threads(); 4802 4803 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4804 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 4805 4806 /* Read ANA log page while resetting ctrlr should be rejected. */ 4807 rc = bdev_nvme_reset(nvme_ctrlr); 4808 CU_ASSERT(rc == 0); 4809 4810 nvme_ctrlr_read_ana_log_page(nvme_ctrlr); 4811 4812 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4813 4814 poll_threads(); 4815 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4816 poll_threads(); 4817 4818 set_thread(0); 4819 4820 rc = bdev_nvme_delete("nvme0", &g_any_path); 4821 CU_ASSERT(rc == 0); 4822 4823 poll_threads(); 4824 spdk_delay_us(1000); 4825 poll_threads(); 4826 4827 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 4828 } 4829 4830 static void 4831 test_retry_io_for_ana_error(void) 4832 { 4833 struct nvme_path_id path = {}; 4834 struct spdk_nvme_ctrlr *ctrlr; 4835 struct nvme_bdev_ctrlr *nbdev_ctrlr; 4836 struct nvme_ctrlr *nvme_ctrlr; 4837 const int STRING_SIZE = 32; 4838 const char *attached_names[STRING_SIZE]; 4839 struct nvme_bdev *bdev; 4840 struct nvme_ns *nvme_ns; 4841 struct spdk_bdev_io *bdev_io; 4842 struct nvme_bdev_io *bio; 4843 struct spdk_io_channel *ch; 4844 struct nvme_bdev_channel *nbdev_ch; 4845 struct nvme_io_path *io_path; 4846 struct nvme_qpair *nvme_qpair; 4847 struct ut_nvme_req *req; 4848 uint64_t now; 4849 int rc; 4850 4851 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 4852 ut_init_trid(&path.trid); 4853 4854 g_opts.bdev_retry_count = 1; 4855 4856 set_thread(0); 4857 4858 ctrlr = ut_attach_ctrlr(&path.trid, 1, true, false); 4859 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 4860 4861 g_ut_attach_ctrlr_status = 0; 4862 g_ut_attach_bdev_count = 1; 4863 4864 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 4865 attach_ctrlr_done, NULL, NULL, NULL, false); 4866 CU_ASSERT(rc == 0); 4867 4868 spdk_delay_us(1000); 4869 poll_threads(); 4870 4871 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4872 poll_threads(); 4873 4874 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 4875 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 4876 4877 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 4878 CU_ASSERT(nvme_ctrlr != NULL); 4879 4880 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 4881 CU_ASSERT(bdev != NULL); 4882 4883 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 4884 CU_ASSERT(nvme_ns != NULL); 4885 4886 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 4887 ut_bdev_io_set_buf(bdev_io); 4888 4889 bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 4890 4891 ch = spdk_get_io_channel(bdev); 4892 SPDK_CU_ASSERT_FATAL(ch != NULL); 4893 4894 nbdev_ch = spdk_io_channel_get_ctx(ch); 4895 4896 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 4897 SPDK_CU_ASSERT_FATAL(io_path != NULL); 4898 4899 nvme_qpair = io_path->qpair; 4900 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 4901 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 4902 4903 now = spdk_get_ticks(); 4904 4905 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 4906 4907 /* If I/O got ANA error, it should be queued, the corresponding namespace 4908 * should be freezed and its ANA state should be updated. 4909 */ 4910 bdev_io->internal.in_submit_request = true; 4911 4912 bdev_nvme_submit_request(ch, bdev_io); 4913 4914 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 4915 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4916 4917 req = ut_get_outstanding_nvme_request(nvme_qpair->qpair, bio); 4918 SPDK_CU_ASSERT_FATAL(req != NULL); 4919 4920 nvme_ns->ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 4921 req->cpl.status.sc = SPDK_NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE; 4922 req->cpl.status.sct = SPDK_NVME_SCT_PATH; 4923 4924 poll_thread_times(0, 1); 4925 4926 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4927 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4928 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4929 /* I/O should be retried immediately. */ 4930 CU_ASSERT(bio->retry_ticks == now); 4931 CU_ASSERT(nvme_ns->ana_state_updating == true); 4932 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == true); 4933 4934 poll_threads(); 4935 4936 /* Namespace is inaccessible, and hence I/O should be queued again. */ 4937 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4938 CU_ASSERT(bdev_io->internal.in_submit_request == true); 4939 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 4940 /* I/O should be retried after a second if no I/O path was found but 4941 * any I/O path may become available. 4942 */ 4943 CU_ASSERT(bio->retry_ticks == now + spdk_get_ticks_hz()); 4944 4945 /* Namespace should be unfreezed after completing to update its ANA state. */ 4946 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 4947 poll_threads(); 4948 4949 CU_ASSERT(nvme_ns->ana_state_updating == false); 4950 CU_ASSERT(nvme_ns->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 4951 CU_ASSERT(nvme_ctrlr->ana_log_page_updating == false); 4952 4953 /* Retry the queued I/O should succeed. */ 4954 spdk_delay_us(spdk_get_ticks_hz() - g_opts.nvme_adminq_poll_period_us); 4955 poll_threads(); 4956 4957 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 4958 CU_ASSERT(bdev_io->internal.in_submit_request == false); 4959 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 4960 4961 free(bdev_io); 4962 4963 spdk_put_io_channel(ch); 4964 4965 poll_threads(); 4966 4967 rc = bdev_nvme_delete("nvme0", &g_any_path); 4968 CU_ASSERT(rc == 0); 4969 4970 poll_threads(); 4971 spdk_delay_us(1000); 4972 poll_threads(); 4973 4974 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 4975 4976 g_opts.bdev_retry_count = 0; 4977 } 4978 4979 static void 4980 test_check_io_error_resiliency_params(void) 4981 { 4982 /* 1st parameter is ctrlr_loss_timeout_sec, 2nd parameter is reconnect_delay_sec, and 4983 * 3rd parameter is fast_io_fail_timeout_sec. 4984 */ 4985 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-2, 1, 0) == false); 4986 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 0, 0) == false); 4987 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 0, 0) == false); 4988 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(1, 2, 0) == false); 4989 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 1, 0) == false); 4990 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 0) == true); 4991 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 2, 0) == true); 4992 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 0) == true); 4993 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, 0) == true); 4994 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, 0) == true); 4995 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(0, 0, 1) == false); 4996 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 2, 1) == false); 4997 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 4) == false); 4998 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(3, 2, 1) == false); 4999 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, 1, 1) == true); 5000 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 2) == true); 5001 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(2, 1, 1) == true); 5002 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(INT32_MAX, INT32_MAX, INT32_MAX) == true); 5003 CU_ASSERT(bdev_nvme_check_io_error_resiliency_params(-1, UINT32_MAX, UINT32_MAX) == true); 5004 } 5005 5006 static void 5007 test_retry_io_if_ctrlr_is_resetting(void) 5008 { 5009 struct nvme_path_id path = {}; 5010 struct nvme_ctrlr_opts opts = {}; 5011 struct spdk_nvme_ctrlr *ctrlr; 5012 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5013 struct nvme_ctrlr *nvme_ctrlr; 5014 const int STRING_SIZE = 32; 5015 const char *attached_names[STRING_SIZE]; 5016 struct nvme_bdev *bdev; 5017 struct nvme_ns *nvme_ns; 5018 struct spdk_bdev_io *bdev_io1, *bdev_io2; 5019 struct spdk_io_channel *ch; 5020 struct nvme_bdev_channel *nbdev_ch; 5021 struct nvme_io_path *io_path; 5022 struct nvme_qpair *nvme_qpair; 5023 int rc; 5024 5025 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5026 ut_init_trid(&path.trid); 5027 5028 set_thread(0); 5029 5030 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5031 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5032 5033 g_ut_attach_ctrlr_status = 0; 5034 g_ut_attach_bdev_count = 1; 5035 5036 opts.ctrlr_loss_timeout_sec = -1; 5037 opts.reconnect_delay_sec = 1; 5038 5039 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5040 attach_ctrlr_done, NULL, NULL, &opts, false); 5041 CU_ASSERT(rc == 0); 5042 5043 spdk_delay_us(1000); 5044 poll_threads(); 5045 5046 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5047 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5048 5049 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5050 CU_ASSERT(nvme_ctrlr != NULL); 5051 5052 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5053 CU_ASSERT(bdev != NULL); 5054 5055 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5056 CU_ASSERT(nvme_ns != NULL); 5057 5058 bdev_io1 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5059 ut_bdev_io_set_buf(bdev_io1); 5060 5061 bdev_io2 = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 5062 ut_bdev_io_set_buf(bdev_io2); 5063 5064 ch = spdk_get_io_channel(bdev); 5065 SPDK_CU_ASSERT_FATAL(ch != NULL); 5066 5067 nbdev_ch = spdk_io_channel_get_ctx(ch); 5068 5069 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5070 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5071 5072 nvme_qpair = io_path->qpair; 5073 SPDK_CU_ASSERT_FATAL(nvme_qpair != NULL); 5074 SPDK_CU_ASSERT_FATAL(nvme_qpair->qpair != NULL); 5075 5076 bdev_io1->internal.ch = (struct spdk_bdev_channel *)ch; 5077 bdev_io2->internal.ch = (struct spdk_bdev_channel *)ch; 5078 5079 /* If qpair is connected, I/O should succeed. */ 5080 bdev_io1->internal.in_submit_request = true; 5081 5082 bdev_nvme_submit_request(ch, bdev_io1); 5083 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5084 5085 poll_threads(); 5086 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5087 CU_ASSERT(bdev_io1->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS); 5088 5089 /* If qpair is disconnected, it is freed and then reconnected via resetting 5090 * the corresponding nvme_ctrlr. I/O should be queued if it is submitted 5091 * while resetting the nvme_ctrlr. 5092 */ 5093 nvme_qpair->qpair->failure_reason = SPDK_NVME_QPAIR_FAILURE_UNKNOWN; 5094 ctrlr->is_failed = true; 5095 5096 poll_thread_times(0, 5); 5097 5098 CU_ASSERT(nvme_qpair->qpair == NULL); 5099 CU_ASSERT(nvme_ctrlr->resetting == true); 5100 CU_ASSERT(ctrlr->is_failed == false); 5101 5102 bdev_io1->internal.in_submit_request = true; 5103 5104 bdev_nvme_submit_request(ch, bdev_io1); 5105 5106 spdk_delay_us(1); 5107 5108 bdev_io2->internal.in_submit_request = true; 5109 5110 bdev_nvme_submit_request(ch, bdev_io2); 5111 5112 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5113 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5114 CU_ASSERT(bdev_io1 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5115 CU_ASSERT(bdev_io2 == TAILQ_NEXT(bdev_io1, module_link)); 5116 5117 poll_threads(); 5118 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5119 poll_threads(); 5120 5121 CU_ASSERT(nvme_qpair->qpair != NULL); 5122 CU_ASSERT(nvme_ctrlr->resetting == false); 5123 5124 spdk_delay_us(999999 - g_opts.nvme_adminq_poll_period_us); 5125 5126 poll_thread_times(0, 1); 5127 5128 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5129 CU_ASSERT(bdev_io1->internal.in_submit_request == true); 5130 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5131 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5132 5133 poll_threads(); 5134 5135 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5136 CU_ASSERT(bdev_io1->internal.in_submit_request == false); 5137 CU_ASSERT(bdev_io1->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5138 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5139 CU_ASSERT(bdev_io2 == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5140 5141 spdk_delay_us(1); 5142 5143 poll_thread_times(0, 1); 5144 5145 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 1); 5146 CU_ASSERT(bdev_io2->internal.in_submit_request == true); 5147 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5148 5149 poll_threads(); 5150 5151 CU_ASSERT(nvme_qpair->qpair->num_outstanding_reqs == 0); 5152 CU_ASSERT(bdev_io2->internal.in_submit_request == false); 5153 CU_ASSERT(bdev_io2->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 5154 5155 free(bdev_io1); 5156 free(bdev_io2); 5157 5158 spdk_put_io_channel(ch); 5159 5160 poll_threads(); 5161 5162 rc = bdev_nvme_delete("nvme0", &g_any_path); 5163 CU_ASSERT(rc == 0); 5164 5165 poll_threads(); 5166 spdk_delay_us(1000); 5167 poll_threads(); 5168 5169 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 5170 } 5171 5172 static void 5173 test_reconnect_ctrlr(void) 5174 { 5175 struct spdk_nvme_transport_id trid = {}; 5176 struct spdk_nvme_ctrlr ctrlr = {}; 5177 struct nvme_ctrlr *nvme_ctrlr; 5178 struct spdk_io_channel *ch1, *ch2; 5179 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 5180 int rc; 5181 5182 ut_init_trid(&trid); 5183 TAILQ_INIT(&ctrlr.active_io_qpairs); 5184 5185 set_thread(0); 5186 5187 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, NULL); 5188 CU_ASSERT(rc == 0); 5189 5190 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5191 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5192 5193 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = 2; 5194 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5195 5196 ch1 = spdk_get_io_channel(nvme_ctrlr); 5197 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 5198 5199 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 5200 CU_ASSERT(ctrlr_ch1->qpair != NULL); 5201 5202 set_thread(1); 5203 5204 ch2 = spdk_get_io_channel(nvme_ctrlr); 5205 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 5206 5207 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 5208 5209 /* Reset starts from thread 1. */ 5210 set_thread(1); 5211 5212 /* The reset should fail and a reconnect timer should be registered. */ 5213 ctrlr.fail_reset = true; 5214 ctrlr.is_failed = true; 5215 5216 rc = bdev_nvme_reset(nvme_ctrlr); 5217 CU_ASSERT(rc == 0); 5218 CU_ASSERT(nvme_ctrlr->resetting == true); 5219 CU_ASSERT(ctrlr.is_failed == true); 5220 5221 poll_threads(); 5222 5223 CU_ASSERT(nvme_ctrlr->resetting == false); 5224 CU_ASSERT(ctrlr.is_failed == false); 5225 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5226 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5227 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5228 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5229 5230 /* Then a reconnect retry should suceeed. */ 5231 ctrlr.fail_reset = false; 5232 5233 spdk_delay_us(SPDK_SEC_TO_USEC); 5234 poll_thread_times(0, 1); 5235 5236 CU_ASSERT(nvme_ctrlr->resetting == true); 5237 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5238 5239 poll_threads(); 5240 5241 CU_ASSERT(nvme_ctrlr->resetting == false); 5242 CU_ASSERT(ctrlr_ch1->qpair->qpair != NULL); 5243 CU_ASSERT(ctrlr_ch2->qpair->qpair != NULL); 5244 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5245 5246 /* The reset should fail and a reconnect timer should be registered. */ 5247 ctrlr.fail_reset = true; 5248 ctrlr.is_failed = true; 5249 5250 rc = bdev_nvme_reset(nvme_ctrlr); 5251 CU_ASSERT(rc == 0); 5252 CU_ASSERT(nvme_ctrlr->resetting == true); 5253 CU_ASSERT(ctrlr.is_failed == true); 5254 5255 poll_threads(); 5256 5257 CU_ASSERT(nvme_ctrlr->resetting == false); 5258 CU_ASSERT(ctrlr.is_failed == false); 5259 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5260 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5261 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5262 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5263 5264 /* Then a reconnect retry should still fail. */ 5265 spdk_delay_us(SPDK_SEC_TO_USEC); 5266 poll_thread_times(0, 1); 5267 5268 CU_ASSERT(nvme_ctrlr->resetting == true); 5269 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5270 5271 poll_threads(); 5272 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5273 poll_threads(); 5274 5275 CU_ASSERT(nvme_ctrlr->resetting == false); 5276 CU_ASSERT(ctrlr.is_failed == false); 5277 CU_ASSERT(ctrlr_ch1->qpair->qpair == NULL); 5278 CU_ASSERT(ctrlr_ch2->qpair->qpair == NULL); 5279 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5280 5281 /* Then a reconnect retry should still fail and the ctrlr should be deleted. */ 5282 spdk_delay_us(SPDK_SEC_TO_USEC); 5283 poll_threads(); 5284 5285 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5286 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5287 CU_ASSERT(nvme_ctrlr->destruct == true); 5288 5289 spdk_put_io_channel(ch2); 5290 5291 set_thread(0); 5292 5293 spdk_put_io_channel(ch1); 5294 5295 poll_threads(); 5296 spdk_delay_us(1000); 5297 poll_threads(); 5298 5299 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5300 } 5301 5302 static struct nvme_path_id * 5303 ut_get_path_id_by_trid(struct nvme_ctrlr *nvme_ctrlr, 5304 const struct spdk_nvme_transport_id *trid) 5305 { 5306 struct nvme_path_id *p; 5307 5308 TAILQ_FOREACH(p, &nvme_ctrlr->trids, link) { 5309 if (spdk_nvme_transport_id_compare(&p->trid, trid) == 0) { 5310 break; 5311 } 5312 } 5313 5314 return p; 5315 } 5316 5317 static void 5318 test_retry_failover_ctrlr(void) 5319 { 5320 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 5321 struct spdk_nvme_ctrlr ctrlr = {}; 5322 struct nvme_ctrlr *nvme_ctrlr = NULL; 5323 struct nvme_path_id *path_id1, *path_id2, *path_id3; 5324 struct spdk_io_channel *ch; 5325 struct nvme_ctrlr_channel *ctrlr_ch; 5326 int rc; 5327 5328 ut_init_trid(&trid1); 5329 ut_init_trid2(&trid2); 5330 ut_init_trid3(&trid3); 5331 TAILQ_INIT(&ctrlr.active_io_qpairs); 5332 5333 set_thread(0); 5334 5335 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, NULL); 5336 CU_ASSERT(rc == 0); 5337 5338 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 5339 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 5340 5341 nvme_ctrlr->opts.ctrlr_loss_timeout_sec = -1; 5342 nvme_ctrlr->opts.reconnect_delay_sec = 1; 5343 5344 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 5345 CU_ASSERT(rc == 0); 5346 5347 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid3); 5348 CU_ASSERT(rc == 0); 5349 5350 ch = spdk_get_io_channel(nvme_ctrlr); 5351 SPDK_CU_ASSERT_FATAL(ch != NULL); 5352 5353 ctrlr_ch = spdk_io_channel_get_ctx(ch); 5354 5355 path_id1 = ut_get_path_id_by_trid(nvme_ctrlr, &trid1); 5356 SPDK_CU_ASSERT_FATAL(path_id1 != NULL); 5357 CU_ASSERT(path_id1->is_failed == false); 5358 CU_ASSERT(path_id1 == nvme_ctrlr->active_path_id); 5359 5360 /* If reset failed and reconnect is scheduled, path_id is switched from trid1 to trid2. */ 5361 ctrlr.fail_reset = true; 5362 ctrlr.is_failed = true; 5363 5364 rc = bdev_nvme_reset(nvme_ctrlr); 5365 CU_ASSERT(rc == 0); 5366 5367 poll_threads(); 5368 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5369 poll_threads(); 5370 5371 CU_ASSERT(nvme_ctrlr->resetting == false); 5372 CU_ASSERT(ctrlr.is_failed == false); 5373 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5374 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5375 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5376 CU_ASSERT(path_id1->is_failed == true); 5377 5378 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5379 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == true); 5380 5381 path_id2 = ut_get_path_id_by_trid(nvme_ctrlr, &trid2); 5382 SPDK_CU_ASSERT_FATAL(path_id2 != NULL); 5383 CU_ASSERT(path_id2->is_failed == false); 5384 CU_ASSERT(path_id2 == nvme_ctrlr->active_path_id); 5385 5386 /* If we remove trid2 while reconnect is scheduled, trid2 is removed and path_id is 5387 * switched to trid3 but reset is not started. 5388 */ 5389 rc = bdev_nvme_failover(nvme_ctrlr, true); 5390 CU_ASSERT(rc == 0); 5391 5392 CU_ASSERT(ut_get_path_id_by_trid(nvme_ctrlr, &trid2) == NULL); 5393 5394 path_id3 = ut_get_path_id_by_trid(nvme_ctrlr, &trid3); 5395 SPDK_CU_ASSERT_FATAL(path_id3 != NULL); 5396 CU_ASSERT(path_id3->is_failed == false); 5397 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5398 5399 CU_ASSERT(nvme_ctrlr->resetting == false); 5400 5401 /* If reconnect succeeds, trid3 should be the active path_id */ 5402 ctrlr.fail_reset = false; 5403 5404 spdk_delay_us(SPDK_SEC_TO_USEC); 5405 poll_thread_times(0, 1); 5406 5407 CU_ASSERT(nvme_ctrlr->resetting == true); 5408 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer == NULL); 5409 5410 poll_threads(); 5411 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5412 poll_threads(); 5413 5414 CU_ASSERT(path_id3->is_failed == false); 5415 CU_ASSERT(path_id3 == nvme_ctrlr->active_path_id); 5416 CU_ASSERT(nvme_ctrlr->resetting == false); 5417 CU_ASSERT(ctrlr_ch->qpair->qpair != NULL); 5418 CU_ASSERT(nvme_ctrlr->reconnect_is_delayed == false); 5419 5420 spdk_put_io_channel(ch); 5421 5422 poll_threads(); 5423 5424 rc = bdev_nvme_delete("nvme0", &g_any_path); 5425 CU_ASSERT(rc == 0); 5426 5427 poll_threads(); 5428 spdk_delay_us(1000); 5429 poll_threads(); 5430 5431 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5432 } 5433 5434 static void 5435 test_fail_path(void) 5436 { 5437 struct nvme_path_id path = {}; 5438 struct nvme_ctrlr_opts opts = {}; 5439 struct spdk_nvme_ctrlr *ctrlr; 5440 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5441 struct nvme_ctrlr *nvme_ctrlr; 5442 const int STRING_SIZE = 32; 5443 const char *attached_names[STRING_SIZE]; 5444 struct nvme_bdev *bdev; 5445 struct nvme_ns *nvme_ns; 5446 struct spdk_bdev_io *bdev_io; 5447 struct spdk_io_channel *ch; 5448 struct nvme_bdev_channel *nbdev_ch; 5449 struct nvme_io_path *io_path; 5450 struct nvme_ctrlr_channel *ctrlr_ch; 5451 int rc; 5452 5453 /* The test scenario is the following. 5454 * - We set ctrlr_fail_timeout_sec to be smaller than ctrlr_loss_timeout_sec. 5455 * - Rresetting a ctrlr fails and reconnecting the ctrlr is repeated. 5456 * - While reconnecting the ctrlr, an I/O is submitted and queued. 5457 * - The I/O waits until the ctrlr is recovered but ctrlr_fail_timeout_sec 5458 * comes first. The queued I/O is failed. 5459 * - After ctrlr_fail_timeout_sec, any I/O is failed immediately. 5460 * - Then ctrlr_loss_timeout_sec comes and the ctrlr is deleted. 5461 */ 5462 5463 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5464 ut_init_trid(&path.trid); 5465 5466 set_thread(0); 5467 5468 ctrlr = ut_attach_ctrlr(&path.trid, 1, false, false); 5469 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 5470 5471 g_ut_attach_ctrlr_status = 0; 5472 g_ut_attach_bdev_count = 1; 5473 5474 opts.ctrlr_loss_timeout_sec = 4; 5475 opts.reconnect_delay_sec = 1; 5476 opts.fast_io_fail_timeout_sec = 2; 5477 5478 rc = bdev_nvme_create(&path.trid, "nvme0", attached_names, STRING_SIZE, 5479 attach_ctrlr_done, NULL, NULL, &opts, false); 5480 CU_ASSERT(rc == 0); 5481 5482 spdk_delay_us(1000); 5483 poll_threads(); 5484 5485 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5486 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5487 5488 nvme_ctrlr = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path.trid); 5489 CU_ASSERT(nvme_ctrlr != NULL); 5490 5491 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5492 CU_ASSERT(bdev != NULL); 5493 5494 nvme_ns = nvme_ctrlr_get_first_active_ns(nvme_ctrlr); 5495 CU_ASSERT(nvme_ns != NULL); 5496 5497 ch = spdk_get_io_channel(bdev); 5498 SPDK_CU_ASSERT_FATAL(ch != NULL); 5499 5500 nbdev_ch = spdk_io_channel_get_ctx(ch); 5501 5502 io_path = ut_get_io_path_by_ctrlr(nbdev_ch, nvme_ctrlr); 5503 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5504 5505 ctrlr_ch = io_path->qpair->ctrlr_ch; 5506 SPDK_CU_ASSERT_FATAL(ctrlr_ch != NULL); 5507 SPDK_CU_ASSERT_FATAL(ctrlr_ch->qpair->qpair != NULL); 5508 5509 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, ch); 5510 ut_bdev_io_set_buf(bdev_io); 5511 5512 5513 /* Resetting a ctrlr should fail and a reconnect timer should be registered. */ 5514 ctrlr->fail_reset = true; 5515 ctrlr->is_failed = true; 5516 5517 rc = bdev_nvme_reset(nvme_ctrlr); 5518 CU_ASSERT(rc == 0); 5519 CU_ASSERT(nvme_ctrlr->resetting == true); 5520 CU_ASSERT(ctrlr->is_failed == true); 5521 5522 poll_threads(); 5523 5524 CU_ASSERT(nvme_ctrlr->resetting == false); 5525 CU_ASSERT(ctrlr->is_failed == false); 5526 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5527 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5528 CU_ASSERT(nvme_ctrlr->reset_start_tsc != 0); 5529 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5530 5531 /* I/O should be queued. */ 5532 bdev_io->internal.in_submit_request = true; 5533 5534 bdev_nvme_submit_request(ch, bdev_io); 5535 5536 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5537 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5538 5539 /* After a second, the I/O should be still queued and the ctrlr should be 5540 * still recovering. 5541 */ 5542 spdk_delay_us(SPDK_SEC_TO_USEC); 5543 poll_threads(); 5544 5545 CU_ASSERT(bdev_io->internal.in_submit_request == true); 5546 CU_ASSERT(bdev_io == TAILQ_FIRST(&nbdev_ch->retry_io_list)); 5547 5548 CU_ASSERT(nvme_ctrlr->resetting == false); 5549 CU_ASSERT(ctrlr->is_failed == false); 5550 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5551 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5552 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5553 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == false); 5554 5555 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5556 5557 /* After two seconds, ctrlr_fail_timeout_sec should expire. */ 5558 spdk_delay_us(SPDK_SEC_TO_USEC); 5559 poll_threads(); 5560 5561 CU_ASSERT(nvme_ctrlr->resetting == false); 5562 CU_ASSERT(ctrlr->is_failed == false); 5563 CU_ASSERT(ctrlr_ch->qpair->qpair == NULL); 5564 CU_ASSERT(nvme_ctrlr->reconnect_delay_timer != NULL); 5565 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == false); 5566 CU_ASSERT(nvme_ctrlr->fast_io_fail_timedout == true); 5567 5568 /* Then within a second, pending I/O should be failed. */ 5569 spdk_delay_us(SPDK_SEC_TO_USEC); 5570 poll_threads(); 5571 5572 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5573 poll_threads(); 5574 5575 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5576 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5577 CU_ASSERT(TAILQ_EMPTY(&nbdev_ch->retry_io_list)); 5578 5579 /* Another I/O submission should be failed immediately. */ 5580 bdev_io->internal.in_submit_request = true; 5581 5582 bdev_nvme_submit_request(ch, bdev_io); 5583 5584 CU_ASSERT(bdev_io->internal.in_submit_request == false); 5585 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 5586 5587 /* After four seconds, path_loss_timeout_sec should expire and ctrlr should 5588 * be deleted. 5589 */ 5590 spdk_delay_us(SPDK_SEC_TO_USEC); 5591 poll_threads(); 5592 5593 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5594 poll_threads(); 5595 5596 CU_ASSERT(nvme_ctrlr == nvme_ctrlr_get_by_name("nvme0")); 5597 CU_ASSERT(bdev_nvme_check_ctrlr_loss_timeout(nvme_ctrlr) == true); 5598 CU_ASSERT(nvme_ctrlr->destruct == true); 5599 5600 spdk_put_io_channel(ch); 5601 5602 poll_threads(); 5603 spdk_delay_us(1000); 5604 poll_threads(); 5605 5606 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5607 5608 free(bdev_io); 5609 } 5610 5611 static void 5612 test_nvme_ns_cmp(void) 5613 { 5614 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}; 5615 5616 nvme_ns1.id = 0; 5617 nvme_ns2.id = UINT32_MAX; 5618 5619 CU_ASSERT(nvme_ns_cmp(&nvme_ns1, &nvme_ns2) < 0); 5620 CU_ASSERT(nvme_ns_cmp(&nvme_ns2, &nvme_ns1) > 0); 5621 } 5622 5623 static void 5624 test_ana_transition(void) 5625 { 5626 struct spdk_nvme_ctrlr ctrlr = { .cdata.anatt = 10, }; 5627 struct nvme_ctrlr nvme_ctrlr = { .ctrlr = &ctrlr, }; 5628 struct nvme_ns nvme_ns = { .ctrlr = &nvme_ctrlr, }; 5629 struct spdk_nvme_ana_group_descriptor desc = { .ana_group_id = 1, }; 5630 5631 /* case 1: ANA transition timedout is canceled. */ 5632 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5633 nvme_ns.ana_transition_timedout = true; 5634 5635 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5636 5637 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5638 5639 CU_ASSERT(nvme_ns.ana_transition_timedout == false); 5640 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5641 5642 /* case 2: ANATT timer is kept. */ 5643 nvme_ns.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5644 nvme_ns.anatt_timer = SPDK_POLLER_REGISTER(nvme_ns_ana_transition_timedout, 5645 &nvme_ns, 5646 ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5647 5648 desc.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5649 5650 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5651 5652 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5653 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 5654 5655 /* case 3: ANATT timer is stopped. */ 5656 desc.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5657 5658 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5659 5660 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5661 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 5662 5663 /* ANATT timer is started. */ 5664 desc.ana_state = SPDK_NVME_ANA_CHANGE_STATE; 5665 5666 _nvme_ns_set_ana_state(&nvme_ns, &desc); 5667 5668 CU_ASSERT(nvme_ns.anatt_timer != NULL); 5669 CU_ASSERT(nvme_ns.ana_state == SPDK_NVME_ANA_CHANGE_STATE); 5670 5671 /* ANATT timer is expired. */ 5672 spdk_delay_us(ctrlr.cdata.anatt * SPDK_SEC_TO_USEC); 5673 5674 poll_threads(); 5675 5676 CU_ASSERT(nvme_ns.anatt_timer == NULL); 5677 CU_ASSERT(nvme_ns.ana_transition_timedout == true); 5678 } 5679 5680 static void 5681 _set_preferred_path_cb(void *cb_arg, int rc) 5682 { 5683 bool *done = cb_arg; 5684 5685 *done = true; 5686 } 5687 5688 static void 5689 test_set_preferred_path(void) 5690 { 5691 struct nvme_path_id path1 = {}, path2 = {}, path3 = {}; 5692 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 5693 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5694 const int STRING_SIZE = 32; 5695 const char *attached_names[STRING_SIZE]; 5696 struct nvme_bdev *bdev; 5697 struct spdk_io_channel *ch; 5698 struct nvme_bdev_channel *nbdev_ch; 5699 struct nvme_io_path *io_path; 5700 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5701 const struct spdk_nvme_ctrlr_data *cdata; 5702 bool done; 5703 int rc; 5704 5705 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5706 ut_init_trid(&path1.trid); 5707 ut_init_trid2(&path2.trid); 5708 ut_init_trid3(&path3.trid); 5709 g_ut_attach_ctrlr_status = 0; 5710 g_ut_attach_bdev_count = 1; 5711 5712 set_thread(0); 5713 5714 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5715 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5716 5717 ctrlr1->ns[0].uuid = &uuid1; 5718 5719 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5720 attach_ctrlr_done, NULL, NULL, NULL, true); 5721 CU_ASSERT(rc == 0); 5722 5723 spdk_delay_us(1000); 5724 poll_threads(); 5725 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5726 poll_threads(); 5727 5728 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5729 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5730 5731 ctrlr2->ns[0].uuid = &uuid1; 5732 5733 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5734 attach_ctrlr_done, NULL, NULL, NULL, true); 5735 CU_ASSERT(rc == 0); 5736 5737 spdk_delay_us(1000); 5738 poll_threads(); 5739 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5740 poll_threads(); 5741 5742 ctrlr3 = ut_attach_ctrlr(&path3.trid, 1, true, true); 5743 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 5744 5745 ctrlr3->ns[0].uuid = &uuid1; 5746 5747 rc = bdev_nvme_create(&path3.trid, "nvme0", attached_names, STRING_SIZE, 5748 attach_ctrlr_done, NULL, NULL, NULL, true); 5749 CU_ASSERT(rc == 0); 5750 5751 spdk_delay_us(1000); 5752 poll_threads(); 5753 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5754 poll_threads(); 5755 5756 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5757 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5758 5759 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5760 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5761 5762 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5763 5764 ch = spdk_get_io_channel(bdev); 5765 SPDK_CU_ASSERT_FATAL(ch != NULL); 5766 nbdev_ch = spdk_io_channel_get_ctx(ch); 5767 5768 io_path = bdev_nvme_find_io_path(nbdev_ch); 5769 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5770 5771 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5772 5773 /* If io_path to ctrlr2 is set to the preferred path dynamically, find_io_path() 5774 * should return io_path to ctrlr2. 5775 */ 5776 5777 cdata = spdk_nvme_ctrlr_get_data(ctrlr2); 5778 done = false; 5779 5780 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5781 5782 poll_threads(); 5783 CU_ASSERT(done == true); 5784 5785 io_path = bdev_nvme_find_io_path(nbdev_ch); 5786 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5787 5788 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5789 5790 /* If io_path to ctrlr3 is set to the preferred path and then a new I/O channel is 5791 * acquired, find_io_path() should return io_path to ctrlr3. 5792 */ 5793 5794 spdk_put_io_channel(ch); 5795 5796 poll_threads(); 5797 5798 cdata = spdk_nvme_ctrlr_get_data(ctrlr3); 5799 done = false; 5800 5801 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 5802 5803 poll_threads(); 5804 CU_ASSERT(done == true); 5805 5806 ch = spdk_get_io_channel(bdev); 5807 SPDK_CU_ASSERT_FATAL(ch != NULL); 5808 nbdev_ch = spdk_io_channel_get_ctx(ch); 5809 5810 io_path = bdev_nvme_find_io_path(nbdev_ch); 5811 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5812 5813 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr3); 5814 5815 spdk_put_io_channel(ch); 5816 5817 poll_threads(); 5818 5819 rc = bdev_nvme_delete("nvme0", &g_any_path); 5820 CU_ASSERT(rc == 0); 5821 5822 poll_threads(); 5823 spdk_delay_us(1000); 5824 poll_threads(); 5825 5826 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 5827 } 5828 5829 static void 5830 test_find_next_io_path(void) 5831 { 5832 struct nvme_bdev_channel nbdev_ch = { 5833 .io_path_list = STAILQ_HEAD_INITIALIZER(nbdev_ch.io_path_list), 5834 .mp_policy = BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 5835 }; 5836 struct spdk_nvme_qpair qpair1 = {}, qpair2 = {}, qpair3 = {}; 5837 struct spdk_nvme_ctrlr ctrlr1 = {}, ctrlr2 = {}, ctrlr3 = {}; 5838 struct nvme_ctrlr nvme_ctrlr1 = { .ctrlr = &ctrlr1, }; 5839 struct nvme_ctrlr nvme_ctrlr2 = { .ctrlr = &ctrlr2, }; 5840 struct nvme_ctrlr nvme_ctrlr3 = { .ctrlr = &ctrlr3, }; 5841 struct nvme_ctrlr_channel ctrlr_ch1 = {}; 5842 struct nvme_ctrlr_channel ctrlr_ch2 = {}; 5843 struct nvme_ctrlr_channel ctrlr_ch3 = {}; 5844 struct nvme_qpair nvme_qpair1 = { .ctrlr_ch = &ctrlr_ch1, .ctrlr = &nvme_ctrlr1, .qpair = &qpair1, }; 5845 struct nvme_qpair nvme_qpair2 = { .ctrlr_ch = &ctrlr_ch2, .ctrlr = &nvme_ctrlr2, .qpair = &qpair2, }; 5846 struct nvme_qpair nvme_qpair3 = { .ctrlr_ch = &ctrlr_ch3, .ctrlr = &nvme_ctrlr3, .qpair = &qpair3, }; 5847 struct nvme_ns nvme_ns1 = {}, nvme_ns2 = {}, nvme_ns3 = {}; 5848 struct nvme_io_path io_path1 = { .qpair = &nvme_qpair1, .nvme_ns = &nvme_ns1, }; 5849 struct nvme_io_path io_path2 = { .qpair = &nvme_qpair2, .nvme_ns = &nvme_ns2, }; 5850 struct nvme_io_path io_path3 = { .qpair = &nvme_qpair3, .nvme_ns = &nvme_ns3, }; 5851 5852 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path1, stailq); 5853 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path2, stailq); 5854 STAILQ_INSERT_TAIL(&nbdev_ch.io_path_list, &io_path3, stailq); 5855 5856 /* nbdev_ch->current_io_path is filled always when bdev_nvme_find_next_io_path() is called. */ 5857 5858 nbdev_ch.current_io_path = &io_path2; 5859 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5860 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5861 nvme_ns3.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5862 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5863 5864 nvme_ns1.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5865 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5866 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5867 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5868 5869 nvme_ns1.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5870 nvme_ns2.ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 5871 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5872 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path1); 5873 5874 nbdev_ch.current_io_path = &io_path3; 5875 nvme_ns1.ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 5876 nvme_ns2.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5877 nvme_ns3.ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 5878 CU_ASSERT(bdev_nvme_find_io_path(&nbdev_ch) == &io_path2); 5879 } 5880 5881 static void 5882 test_disable_auto_failback(void) 5883 { 5884 struct nvme_path_id path1 = {}, path2 = {}; 5885 struct nvme_ctrlr_opts opts = {}; 5886 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 5887 struct nvme_bdev_ctrlr *nbdev_ctrlr; 5888 struct nvme_ctrlr *nvme_ctrlr1; 5889 const int STRING_SIZE = 32; 5890 const char *attached_names[STRING_SIZE]; 5891 struct nvme_bdev *bdev; 5892 struct spdk_io_channel *ch; 5893 struct nvme_bdev_channel *nbdev_ch; 5894 struct nvme_io_path *io_path; 5895 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 5896 const struct spdk_nvme_ctrlr_data *cdata; 5897 bool done; 5898 int rc; 5899 5900 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 5901 ut_init_trid(&path1.trid); 5902 ut_init_trid2(&path2.trid); 5903 g_ut_attach_ctrlr_status = 0; 5904 g_ut_attach_bdev_count = 1; 5905 5906 g_opts.disable_auto_failback = true; 5907 5908 opts.ctrlr_loss_timeout_sec = -1; 5909 opts.reconnect_delay_sec = 1; 5910 5911 set_thread(0); 5912 5913 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 5914 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 5915 5916 ctrlr1->ns[0].uuid = &uuid1; 5917 5918 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 5919 attach_ctrlr_done, NULL, NULL, &opts, true); 5920 CU_ASSERT(rc == 0); 5921 5922 spdk_delay_us(1000); 5923 poll_threads(); 5924 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5925 poll_threads(); 5926 5927 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 5928 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 5929 5930 ctrlr2->ns[0].uuid = &uuid1; 5931 5932 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 5933 attach_ctrlr_done, NULL, NULL, &opts, true); 5934 CU_ASSERT(rc == 0); 5935 5936 spdk_delay_us(1000); 5937 poll_threads(); 5938 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5939 poll_threads(); 5940 5941 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 5942 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 5943 5944 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 5945 SPDK_CU_ASSERT_FATAL(bdev != NULL); 5946 5947 nvme_ctrlr1 = nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &path1.trid); 5948 SPDK_CU_ASSERT_FATAL(nvme_ctrlr1 != NULL); 5949 5950 /* ctrlr1 was added first. Hence io_path to ctrlr1 should be preferred. */ 5951 5952 ch = spdk_get_io_channel(bdev); 5953 SPDK_CU_ASSERT_FATAL(ch != NULL); 5954 nbdev_ch = spdk_io_channel_get_ctx(ch); 5955 5956 io_path = bdev_nvme_find_io_path(nbdev_ch); 5957 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5958 5959 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 5960 5961 /* If resetting ctrlr1 failed, io_path to ctrlr2 should be used. */ 5962 ctrlr1->fail_reset = true; 5963 ctrlr1->is_failed = true; 5964 5965 bdev_nvme_reset(nvme_ctrlr1); 5966 5967 poll_threads(); 5968 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5969 poll_threads(); 5970 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 5971 poll_threads(); 5972 5973 CU_ASSERT(ctrlr1->adminq.is_connected == false); 5974 5975 io_path = bdev_nvme_find_io_path(nbdev_ch); 5976 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5977 5978 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5979 5980 /* After a second, ctrlr1 is recovered. However, automatic failback is disabled. 5981 * Hence, io_path to ctrlr2 should still be used. 5982 */ 5983 ctrlr1->fail_reset = false; 5984 5985 spdk_delay_us(SPDK_SEC_TO_USEC); 5986 poll_threads(); 5987 5988 CU_ASSERT(ctrlr1->adminq.is_connected == true); 5989 5990 io_path = bdev_nvme_find_io_path(nbdev_ch); 5991 SPDK_CU_ASSERT_FATAL(io_path != NULL); 5992 5993 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr2); 5994 5995 /* Set io_path to ctrlr1 to preferred explicitly. Then io_path to ctrlr1 should 5996 * be used again. 5997 */ 5998 5999 cdata = spdk_nvme_ctrlr_get_data(ctrlr1); 6000 done = false; 6001 6002 bdev_nvme_set_preferred_path(bdev->disk.name, cdata->cntlid, _set_preferred_path_cb, &done); 6003 6004 poll_threads(); 6005 CU_ASSERT(done == true); 6006 6007 io_path = bdev_nvme_find_io_path(nbdev_ch); 6008 SPDK_CU_ASSERT_FATAL(io_path != NULL); 6009 6010 CU_ASSERT(io_path->nvme_ns->ctrlr->ctrlr == ctrlr1); 6011 6012 spdk_put_io_channel(ch); 6013 6014 poll_threads(); 6015 6016 rc = bdev_nvme_delete("nvme0", &g_any_path); 6017 CU_ASSERT(rc == 0); 6018 6019 poll_threads(); 6020 spdk_delay_us(1000); 6021 poll_threads(); 6022 6023 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6024 6025 g_opts.disable_auto_failback = false; 6026 } 6027 6028 static void 6029 ut_set_multipath_policy_done(void *cb_arg, int rc) 6030 { 6031 int *done = cb_arg; 6032 6033 SPDK_CU_ASSERT_FATAL(done != NULL); 6034 *done = rc; 6035 } 6036 6037 static void 6038 test_set_multipath_policy(void) 6039 { 6040 struct nvme_path_id path1 = {}, path2 = {}; 6041 struct nvme_ctrlr_opts opts = {}; 6042 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 6043 struct nvme_bdev_ctrlr *nbdev_ctrlr; 6044 const int STRING_SIZE = 32; 6045 const char *attached_names[STRING_SIZE]; 6046 struct nvme_bdev *bdev; 6047 struct spdk_io_channel *ch; 6048 struct nvme_bdev_channel *nbdev_ch; 6049 struct spdk_uuid uuid1 = { .u.raw = { 0x1 } }; 6050 int done; 6051 int rc; 6052 6053 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 6054 ut_init_trid(&path1.trid); 6055 ut_init_trid2(&path2.trid); 6056 g_ut_attach_ctrlr_status = 0; 6057 g_ut_attach_bdev_count = 1; 6058 6059 g_opts.disable_auto_failback = true; 6060 6061 opts.ctrlr_loss_timeout_sec = -1; 6062 opts.reconnect_delay_sec = 1; 6063 6064 set_thread(0); 6065 6066 ctrlr1 = ut_attach_ctrlr(&path1.trid, 1, true, true); 6067 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 6068 6069 ctrlr1->ns[0].uuid = &uuid1; 6070 6071 rc = bdev_nvme_create(&path1.trid, "nvme0", attached_names, STRING_SIZE, 6072 attach_ctrlr_done, NULL, NULL, &opts, true); 6073 CU_ASSERT(rc == 0); 6074 6075 spdk_delay_us(1000); 6076 poll_threads(); 6077 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6078 poll_threads(); 6079 6080 ctrlr2 = ut_attach_ctrlr(&path2.trid, 1, true, true); 6081 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 6082 6083 ctrlr2->ns[0].uuid = &uuid1; 6084 6085 rc = bdev_nvme_create(&path2.trid, "nvme0", attached_names, STRING_SIZE, 6086 attach_ctrlr_done, NULL, NULL, &opts, true); 6087 CU_ASSERT(rc == 0); 6088 6089 spdk_delay_us(1000); 6090 poll_threads(); 6091 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 6092 poll_threads(); 6093 6094 nbdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 6095 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 6096 6097 bdev = nvme_bdev_ctrlr_get_bdev(nbdev_ctrlr, 1); 6098 SPDK_CU_ASSERT_FATAL(bdev != NULL); 6099 6100 /* If multipath policy is updated before getting any I/O channel, 6101 * an new I/O channel should have the update. 6102 */ 6103 done = -1; 6104 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE, 6105 ut_set_multipath_policy_done, &done); 6106 poll_threads(); 6107 CU_ASSERT(done == 0); 6108 6109 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6110 6111 ch = spdk_get_io_channel(bdev); 6112 SPDK_CU_ASSERT_FATAL(ch != NULL); 6113 nbdev_ch = spdk_io_channel_get_ctx(ch); 6114 6115 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_ACTIVE); 6116 6117 /* If multipath policy is updated while a I/O channel is active, 6118 * the update should be applied to the I/O channel immediately. 6119 */ 6120 done = -1; 6121 bdev_nvme_set_multipath_policy(bdev->disk.name, BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE, 6122 ut_set_multipath_policy_done, &done); 6123 poll_threads(); 6124 CU_ASSERT(done == 0); 6125 6126 CU_ASSERT(bdev->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6127 CU_ASSERT(nbdev_ch->mp_policy == BDEV_NVME_MP_POLICY_ACTIVE_PASSIVE); 6128 6129 spdk_put_io_channel(ch); 6130 6131 poll_threads(); 6132 6133 rc = bdev_nvme_delete("nvme0", &g_any_path); 6134 CU_ASSERT(rc == 0); 6135 6136 poll_threads(); 6137 spdk_delay_us(1000); 6138 poll_threads(); 6139 6140 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 6141 } 6142 6143 static void 6144 test_uuid_generation(void) 6145 { 6146 uint32_t nsid1 = 1, nsid2 = 2; 6147 char sn1[21] = "SPDK CTRLR SERIAL 01", sn2[21] = "SPDK CTRLR SERIAL 02"; 6148 char sn3[21] = " "; 6149 char uuid_str[SPDK_UUID_STRING_LEN] = {'\0'}; 6150 struct spdk_uuid uuid1, uuid2; 6151 6152 /* Test case 1: 6153 * Serial numbers are the same, nsids are different. 6154 * Compare two generated UUID - they should be different. */ 6155 uuid1 = nvme_generate_uuid(sn1, nsid1); 6156 uuid2 = nvme_generate_uuid(sn1, nsid2); 6157 6158 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6159 6160 /* Test case 2: 6161 * Serial numbers differ only by one character, nsids are the same. 6162 * Compare two generated UUID - they should be different. */ 6163 uuid1 = nvme_generate_uuid(sn1, nsid1); 6164 uuid2 = nvme_generate_uuid(sn2, nsid1); 6165 6166 CU_ASSERT((spdk_uuid_compare(&uuid1, &uuid2)) != 0); 6167 6168 /* Test case 3: 6169 * Serial number comprises only of space characters. 6170 * Validate the generated UUID. */ 6171 uuid1 = nvme_generate_uuid(sn3, nsid1); 6172 CU_ASSERT((spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), &uuid1)) == 0); 6173 } 6174 6175 int 6176 main(int argc, const char **argv) 6177 { 6178 CU_pSuite suite = NULL; 6179 unsigned int num_failures; 6180 6181 CU_set_error_action(CUEA_ABORT); 6182 CU_initialize_registry(); 6183 6184 suite = CU_add_suite("nvme", NULL, NULL); 6185 6186 CU_ADD_TEST(suite, test_create_ctrlr); 6187 CU_ADD_TEST(suite, test_reset_ctrlr); 6188 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 6189 CU_ADD_TEST(suite, test_failover_ctrlr); 6190 CU_ADD_TEST(suite, test_race_between_failover_and_add_secondary_trid); 6191 CU_ADD_TEST(suite, test_pending_reset); 6192 CU_ADD_TEST(suite, test_attach_ctrlr); 6193 CU_ADD_TEST(suite, test_aer_cb); 6194 CU_ADD_TEST(suite, test_submit_nvme_cmd); 6195 CU_ADD_TEST(suite, test_add_remove_trid); 6196 CU_ADD_TEST(suite, test_abort); 6197 CU_ADD_TEST(suite, test_get_io_qpair); 6198 CU_ADD_TEST(suite, test_bdev_unregister); 6199 CU_ADD_TEST(suite, test_compare_ns); 6200 CU_ADD_TEST(suite, test_init_ana_log_page); 6201 CU_ADD_TEST(suite, test_get_memory_domains); 6202 CU_ADD_TEST(suite, test_reconnect_qpair); 6203 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 6204 CU_ADD_TEST(suite, test_add_multi_ns_to_bdev); 6205 CU_ADD_TEST(suite, test_add_multi_io_paths_to_nbdev_ch); 6206 CU_ADD_TEST(suite, test_admin_path); 6207 CU_ADD_TEST(suite, test_reset_bdev_ctrlr); 6208 CU_ADD_TEST(suite, test_find_io_path); 6209 CU_ADD_TEST(suite, test_retry_io_if_ana_state_is_updating); 6210 CU_ADD_TEST(suite, test_retry_io_for_io_path_error); 6211 CU_ADD_TEST(suite, test_retry_io_count); 6212 CU_ADD_TEST(suite, test_concurrent_read_ana_log_page); 6213 CU_ADD_TEST(suite, test_retry_io_for_ana_error); 6214 CU_ADD_TEST(suite, test_check_io_error_resiliency_params); 6215 CU_ADD_TEST(suite, test_retry_io_if_ctrlr_is_resetting); 6216 CU_ADD_TEST(suite, test_reconnect_ctrlr); 6217 CU_ADD_TEST(suite, test_retry_failover_ctrlr); 6218 CU_ADD_TEST(suite, test_fail_path); 6219 CU_ADD_TEST(suite, test_nvme_ns_cmp); 6220 CU_ADD_TEST(suite, test_ana_transition); 6221 CU_ADD_TEST(suite, test_set_preferred_path); 6222 CU_ADD_TEST(suite, test_find_next_io_path); 6223 CU_ADD_TEST(suite, test_disable_auto_failback); 6224 CU_ADD_TEST(suite, test_set_multipath_policy); 6225 CU_ADD_TEST(suite, test_uuid_generation); 6226 6227 CU_basic_set_mode(CU_BRM_VERBOSE); 6228 6229 allocate_threads(3); 6230 set_thread(0); 6231 bdev_nvme_library_init(); 6232 init_accel(); 6233 6234 CU_basic_run_tests(); 6235 6236 set_thread(0); 6237 bdev_nvme_library_fini(); 6238 fini_accel(); 6239 free_threads(); 6240 6241 num_failures = CU_get_number_of_failures(); 6242 CU_cleanup_registry(); 6243 6244 return num_failures; 6245 } 6246