1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 #include "spdk_cunit.h" 37 #include "spdk/thread.h" 38 #include "spdk/bdev_module.h" 39 #include "spdk/bdev_module.h" 40 41 #include "common/lib/ut_multithread.c" 42 43 #include "bdev/nvme/bdev_nvme.c" 44 45 #include "unit/lib/json_mock.c" 46 47 static void *g_accel_p = (void *)0xdeadbeaf; 48 49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 50 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 51 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 52 spdk_nvme_remove_cb remove_cb), NULL); 53 54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 55 enum spdk_nvme_transport_type trtype)); 56 57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 58 NULL); 59 60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 61 62 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 63 struct spdk_nvme_transport_id *trid), 0); 64 65 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 66 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 67 68 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 69 70 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 71 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 72 73 DEFINE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain, int); 74 75 int spdk_nvme_ctrlr_get_memory_domains(const struct spdk_nvme_ctrlr *ctrlr, 76 struct spdk_memory_domain **domains, int array_size) 77 { 78 HANDLE_RETURN_MOCK(spdk_nvme_ctrlr_get_memory_domain); 79 80 return 0; 81 } 82 83 struct spdk_io_channel * 84 spdk_accel_engine_get_io_channel(void) 85 { 86 return spdk_get_io_channel(g_accel_p); 87 } 88 89 void 90 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 91 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 92 { 93 /* Avoid warning that opts is used uninitialised */ 94 memset(opts, 0, opts_size); 95 } 96 97 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 98 (const struct spdk_nvme_ctrlr *ctrlr), 0); 99 100 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 101 (struct spdk_nvme_ctrlr *ctrlr), NULL); 102 103 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 104 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 105 106 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 107 uint64_t timeout_io_us, uint64_t timeout_admin_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 108 109 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 110 111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 112 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 113 114 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 115 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 116 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 117 118 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 119 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 120 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 121 122 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 123 124 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 125 126 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 127 128 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 129 130 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 131 132 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 133 134 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 135 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 136 137 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 138 139 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi, 140 (const struct spdk_nvme_ns *ns), 0); 141 142 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 143 char *name, size_t *size), 0); 144 145 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 146 (struct spdk_nvme_ns *ns), 0); 147 148 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 149 (const struct spdk_nvme_ctrlr *ctrlr), 0); 150 151 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 152 (struct spdk_nvme_ns *ns), 0); 153 154 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 155 (struct spdk_nvme_ns *ns), 0); 156 157 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 158 (struct spdk_nvme_ns *ns), 0); 159 160 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 161 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 162 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 163 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 164 165 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 166 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 167 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 168 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 169 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 170 171 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 172 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 173 void *payload, uint32_t payload_size, uint64_t slba, 174 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 175 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 176 177 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 178 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 179 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 180 181 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 182 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 183 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 184 185 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 186 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 187 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 188 189 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 190 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 191 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 192 193 DEFINE_STUB(spdk_nvme_ns_get_nguid, const uint8_t *, (const struct spdk_nvme_ns *ns), NULL); 194 195 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 196 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 197 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 198 199 DEFINE_STUB_V(spdk_bdev_module_fini_done, (void)); 200 201 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 202 203 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 204 205 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 206 207 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 208 struct iovec *iov, 209 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 210 211 DEFINE_STUB_V(spdk_nvme_ctrlr_prepare_for_reset, (struct spdk_nvme_ctrlr *ctrlr)); 212 213 struct ut_nvme_req { 214 uint16_t opc; 215 spdk_nvme_cmd_cb cb_fn; 216 void *cb_arg; 217 struct spdk_nvme_cpl cpl; 218 TAILQ_ENTRY(ut_nvme_req) tailq; 219 }; 220 221 struct spdk_nvme_ns { 222 struct spdk_nvme_ctrlr *ctrlr; 223 uint32_t id; 224 bool is_active; 225 struct spdk_uuid uuid; 226 enum spdk_nvme_ana_state ana_state; 227 }; 228 229 struct spdk_nvme_qpair { 230 struct spdk_nvme_ctrlr *ctrlr; 231 bool is_connected; 232 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 233 uint32_t num_outstanding_reqs; 234 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 235 struct spdk_nvme_poll_group *poll_group; 236 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 237 }; 238 239 struct spdk_nvme_ctrlr { 240 uint32_t num_ns; 241 struct spdk_nvme_ns *ns; 242 struct spdk_nvme_ns_data *nsdata; 243 struct spdk_nvme_qpair adminq; 244 struct spdk_nvme_ctrlr_data cdata; 245 bool attached; 246 bool is_failed; 247 bool fail_reset; 248 struct spdk_nvme_transport_id trid; 249 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 250 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 251 struct spdk_nvme_ctrlr_opts opts; 252 }; 253 254 struct spdk_nvme_poll_group { 255 void *ctx; 256 struct spdk_nvme_accel_fn_table accel_fn_table; 257 TAILQ_HEAD(, spdk_nvme_qpair) qpairs; 258 }; 259 260 struct spdk_nvme_probe_ctx { 261 struct spdk_nvme_transport_id trid; 262 void *cb_ctx; 263 spdk_nvme_attach_cb attach_cb; 264 struct spdk_nvme_ctrlr *init_ctrlr; 265 }; 266 267 struct spdk_nvme_ctrlr_reset_ctx { 268 struct spdk_nvme_ctrlr *ctrlr; 269 }; 270 271 uint32_t 272 spdk_nvme_ctrlr_get_first_active_ns(struct spdk_nvme_ctrlr *ctrlr) 273 { 274 uint32_t nsid; 275 276 for (nsid = 1; nsid <= ctrlr->num_ns; nsid++) { 277 if (ctrlr->ns[nsid - 1].is_active) { 278 return nsid; 279 } 280 } 281 282 return 0; 283 } 284 285 uint32_t 286 spdk_nvme_ctrlr_get_next_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 287 { 288 for (nsid = nsid + 1; nsid <= ctrlr->num_ns; nsid++) { 289 if (ctrlr->ns[nsid - 1].is_active) { 290 return nsid; 291 } 292 } 293 294 return 0; 295 } 296 297 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 298 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 299 g_ut_attached_ctrlrs); 300 static int g_ut_attach_ctrlr_status; 301 static size_t g_ut_attach_bdev_count; 302 static int g_ut_register_bdev_status; 303 static uint16_t g_ut_cntlid; 304 static struct spdk_nvme_transport_id g_any_trid = {}; 305 306 static void 307 ut_init_trid(struct spdk_nvme_transport_id *trid) 308 { 309 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 310 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 311 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 312 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 313 } 314 315 static void 316 ut_init_trid2(struct spdk_nvme_transport_id *trid) 317 { 318 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 319 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 320 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 321 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 322 } 323 324 static void 325 ut_init_trid3(struct spdk_nvme_transport_id *trid) 326 { 327 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 328 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 329 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 330 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 331 } 332 333 static int 334 cmp_int(int a, int b) 335 { 336 return a - b; 337 } 338 339 int 340 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 341 const struct spdk_nvme_transport_id *trid2) 342 { 343 int cmp; 344 345 /* We assume trtype is TCP for now. */ 346 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 347 348 cmp = cmp_int(trid1->trtype, trid2->trtype); 349 if (cmp) { 350 return cmp; 351 } 352 353 cmp = strcasecmp(trid1->traddr, trid2->traddr); 354 if (cmp) { 355 return cmp; 356 } 357 358 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 359 if (cmp) { 360 return cmp; 361 } 362 363 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 364 if (cmp) { 365 return cmp; 366 } 367 368 cmp = strcmp(trid1->subnqn, trid2->subnqn); 369 if (cmp) { 370 return cmp; 371 } 372 373 return 0; 374 } 375 376 static struct spdk_nvme_ctrlr * 377 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns, 378 bool ana_reporting, bool multi_ctrlr) 379 { 380 struct spdk_nvme_ctrlr *ctrlr; 381 uint32_t i; 382 383 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 384 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 385 /* There is a ctrlr whose trid matches. */ 386 return NULL; 387 } 388 } 389 390 ctrlr = calloc(1, sizeof(*ctrlr)); 391 if (ctrlr == NULL) { 392 return NULL; 393 } 394 395 ctrlr->attached = true; 396 ctrlr->adminq.ctrlr = ctrlr; 397 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 398 399 if (num_ns != 0) { 400 ctrlr->num_ns = num_ns; 401 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 402 if (ctrlr->ns == NULL) { 403 free(ctrlr); 404 return NULL; 405 } 406 407 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 408 if (ctrlr->nsdata == NULL) { 409 free(ctrlr->ns); 410 free(ctrlr); 411 return NULL; 412 } 413 414 for (i = 0; i < num_ns; i++) { 415 ctrlr->ns[i].id = i + 1; 416 ctrlr->ns[i].ctrlr = ctrlr; 417 ctrlr->ns[i].is_active = true; 418 ctrlr->ns[i].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 419 ctrlr->nsdata[i].nsze = 1024; 420 } 421 422 ctrlr->cdata.nn = num_ns; 423 ctrlr->cdata.nanagrpid = num_ns; 424 } 425 426 ctrlr->cdata.cntlid = ++g_ut_cntlid; 427 ctrlr->cdata.cmic.multi_ctrlr = multi_ctrlr; 428 ctrlr->cdata.cmic.ana_reporting = ana_reporting; 429 ctrlr->trid = *trid; 430 TAILQ_INIT(&ctrlr->active_io_qpairs); 431 432 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 433 434 return ctrlr; 435 } 436 437 static void 438 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 439 { 440 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 441 442 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 443 free(ctrlr->nsdata); 444 free(ctrlr->ns); 445 free(ctrlr); 446 } 447 448 static int 449 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 450 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 451 { 452 struct ut_nvme_req *req; 453 454 req = calloc(1, sizeof(*req)); 455 if (req == NULL) { 456 return -ENOMEM; 457 } 458 459 req->opc = opc; 460 req->cb_fn = cb_fn; 461 req->cb_arg = cb_arg; 462 463 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 464 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 465 466 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 467 qpair->num_outstanding_reqs++; 468 469 return 0; 470 } 471 472 static struct spdk_bdev_io * 473 ut_alloc_bdev_io(enum spdk_bdev_io_type type, struct nvme_bdev *nbdev, 474 struct spdk_io_channel *ch) 475 { 476 struct spdk_bdev_io *bdev_io; 477 478 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 479 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 480 bdev_io->type = type; 481 bdev_io->bdev = &nbdev->disk; 482 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 483 484 return bdev_io; 485 } 486 487 static void 488 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 489 { 490 bdev_io->u.bdev.iovs = &bdev_io->iov; 491 bdev_io->u.bdev.iovcnt = 1; 492 493 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 494 bdev_io->iov.iov_len = 4096; 495 } 496 497 static void 498 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 499 { 500 if (ctrlr->is_failed) { 501 free(ctrlr); 502 return; 503 } 504 505 spdk_nvme_ctrlr_get_default_ctrlr_opts(&ctrlr->opts, sizeof(ctrlr->opts)); 506 if (probe_ctx->cb_ctx) { 507 ctrlr->opts = *(struct spdk_nvme_ctrlr_opts *)probe_ctx->cb_ctx; 508 } 509 510 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 511 512 if (probe_ctx->attach_cb) { 513 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 514 } 515 } 516 517 int 518 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 519 { 520 struct spdk_nvme_ctrlr *ctrlr, *tmp; 521 522 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 523 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 524 continue; 525 } 526 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 527 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 528 } 529 530 free(probe_ctx); 531 532 return 0; 533 } 534 535 struct spdk_nvme_probe_ctx * 536 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 537 const struct spdk_nvme_ctrlr_opts *opts, 538 spdk_nvme_attach_cb attach_cb) 539 { 540 struct spdk_nvme_probe_ctx *probe_ctx; 541 542 if (trid == NULL) { 543 return NULL; 544 } 545 546 probe_ctx = calloc(1, sizeof(*probe_ctx)); 547 if (probe_ctx == NULL) { 548 return NULL; 549 } 550 551 probe_ctx->trid = *trid; 552 probe_ctx->cb_ctx = (void *)opts; 553 probe_ctx->attach_cb = attach_cb; 554 555 return probe_ctx; 556 } 557 558 int 559 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 560 { 561 if (ctrlr->attached) { 562 ut_detach_ctrlr(ctrlr); 563 } 564 565 return 0; 566 } 567 568 void 569 spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size) 570 { 571 memset(opts, 0, opts_size); 572 573 snprintf(opts->hostnqn, sizeof(opts->hostnqn), 574 "nqn.2014-08.org.nvmexpress:uuid:7391e776-0716-11ec-9a03-0242ac130003"); 575 } 576 577 const struct spdk_nvme_ctrlr_data * 578 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 579 { 580 return &ctrlr->cdata; 581 } 582 583 uint32_t 584 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 585 { 586 return ctrlr->num_ns; 587 } 588 589 struct spdk_nvme_ns * 590 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 591 { 592 if (nsid < 1 || nsid > ctrlr->num_ns) { 593 return NULL; 594 } 595 596 return &ctrlr->ns[nsid - 1]; 597 } 598 599 bool 600 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 601 { 602 if (nsid < 1 || nsid > ctrlr->num_ns) { 603 return false; 604 } 605 606 return ctrlr->ns[nsid - 1].is_active; 607 } 608 609 union spdk_nvme_csts_register 610 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 611 { 612 union spdk_nvme_csts_register csts; 613 614 csts.raw = 0; 615 616 return csts; 617 } 618 619 union spdk_nvme_vs_register 620 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 621 { 622 union spdk_nvme_vs_register vs; 623 624 vs.raw = 0; 625 626 return vs; 627 } 628 629 struct spdk_nvme_qpair * 630 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 631 const struct spdk_nvme_io_qpair_opts *user_opts, 632 size_t opts_size) 633 { 634 struct spdk_nvme_qpair *qpair; 635 636 qpair = calloc(1, sizeof(*qpair)); 637 if (qpair == NULL) { 638 return NULL; 639 } 640 641 qpair->ctrlr = ctrlr; 642 TAILQ_INIT(&qpair->outstanding_reqs); 643 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 644 645 return qpair; 646 } 647 648 int 649 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 650 struct spdk_nvme_qpair *qpair) 651 { 652 if (qpair->is_connected) { 653 return -EISCONN; 654 } 655 656 qpair->is_connected = true; 657 658 return 0; 659 } 660 661 int 662 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair) 663 { 664 struct spdk_nvme_ctrlr *ctrlr; 665 666 ctrlr = qpair->ctrlr; 667 668 if (ctrlr->is_failed) { 669 return -ENXIO; 670 } 671 qpair->is_connected = true; 672 673 return 0; 674 } 675 676 void 677 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 678 { 679 qpair->is_connected = false; 680 } 681 682 int 683 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 684 { 685 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 686 687 qpair->is_connected = false; 688 689 if (qpair->poll_group != NULL) { 690 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 691 } 692 693 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 694 695 CU_ASSERT(qpair->num_outstanding_reqs == 0); 696 697 free(qpair); 698 699 return 0; 700 } 701 702 int 703 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) 704 { 705 if (ctrlr->fail_reset) { 706 return -EIO; 707 } 708 709 ctrlr->is_failed = false; 710 711 return 0; 712 } 713 714 int 715 spdk_nvme_ctrlr_reset_poll_async(struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx) 716 { 717 struct spdk_nvme_ctrlr *ctrlr = ctrlr_reset_ctx->ctrlr; 718 719 free(ctrlr_reset_ctx); 720 return spdk_nvme_ctrlr_reset(ctrlr); 721 } 722 723 int 724 spdk_nvme_ctrlr_reset_async(struct spdk_nvme_ctrlr *ctrlr, 725 struct spdk_nvme_ctrlr_reset_ctx **reset_ctx) 726 { 727 struct spdk_nvme_ctrlr_reset_ctx *ctrlr_reset_ctx; 728 729 ctrlr_reset_ctx = calloc(1, sizeof(*ctrlr_reset_ctx)); 730 if (!ctrlr_reset_ctx) { 731 return -ENOMEM; 732 } 733 734 ctrlr_reset_ctx->ctrlr = ctrlr; 735 *reset_ctx = ctrlr_reset_ctx; 736 737 return 0; 738 } 739 740 void 741 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 742 { 743 ctrlr->is_failed = true; 744 } 745 746 #define UT_ANA_DESC_SIZE (sizeof(struct spdk_nvme_ana_group_descriptor) + \ 747 sizeof(uint32_t)) 748 static void 749 ut_create_ana_log_page(struct spdk_nvme_ctrlr *ctrlr, char *buf, uint32_t length) 750 { 751 struct spdk_nvme_ana_page ana_hdr; 752 char _ana_desc[UT_ANA_DESC_SIZE]; 753 struct spdk_nvme_ana_group_descriptor *ana_desc; 754 struct spdk_nvme_ns *ns; 755 uint32_t i; 756 757 memset(&ana_hdr, 0, sizeof(ana_hdr)); 758 ana_hdr.num_ana_group_desc = ctrlr->num_ns; 759 760 SPDK_CU_ASSERT_FATAL(sizeof(ana_hdr) <= length); 761 memcpy(buf, (char *)&ana_hdr, sizeof(ana_hdr)); 762 763 buf += sizeof(ana_hdr); 764 length -= sizeof(ana_hdr); 765 766 ana_desc = (struct spdk_nvme_ana_group_descriptor *)_ana_desc; 767 768 for (i = 0; i < ctrlr->num_ns; i++) { 769 ns = &ctrlr->ns[i]; 770 771 if (!ns->is_active) { 772 continue; 773 } 774 775 memset(ana_desc, 0, UT_ANA_DESC_SIZE); 776 777 ana_desc->ana_group_id = ns->id; 778 ana_desc->num_of_nsid = 1; 779 ana_desc->ana_state = ns->ana_state; 780 ana_desc->nsid[0] = ns->id; 781 782 SPDK_CU_ASSERT_FATAL(UT_ANA_DESC_SIZE <= length); 783 memcpy(buf, (char *)ana_desc, UT_ANA_DESC_SIZE); 784 785 buf += UT_ANA_DESC_SIZE; 786 length -= UT_ANA_DESC_SIZE; 787 } 788 } 789 790 int 791 spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, 792 uint8_t log_page, uint32_t nsid, 793 void *payload, uint32_t payload_size, 794 uint64_t offset, 795 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 796 { 797 if (log_page == SPDK_NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS) { 798 SPDK_CU_ASSERT_FATAL(offset == 0); 799 ut_create_ana_log_page(ctrlr, payload, payload_size); 800 } 801 802 return ut_submit_nvme_request(NULL, &ctrlr->adminq, SPDK_NVME_OPC_GET_LOG_PAGE, 803 cb_fn, cb_arg); 804 } 805 806 int 807 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 808 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 809 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 810 { 811 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 812 } 813 814 int 815 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 816 void *cmd_cb_arg, 817 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 818 { 819 struct ut_nvme_req *req = NULL, *abort_req; 820 821 if (qpair == NULL) { 822 qpair = &ctrlr->adminq; 823 } 824 825 abort_req = calloc(1, sizeof(*abort_req)); 826 if (abort_req == NULL) { 827 return -ENOMEM; 828 } 829 830 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 831 if (req->cb_arg == cmd_cb_arg) { 832 break; 833 } 834 } 835 836 if (req == NULL) { 837 free(abort_req); 838 return -ENOENT; 839 } 840 841 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 842 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 843 844 abort_req->opc = SPDK_NVME_OPC_ABORT; 845 abort_req->cb_fn = cb_fn; 846 abort_req->cb_arg = cb_arg; 847 848 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 849 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 850 abort_req->cpl.cdw0 = 0; 851 852 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 853 ctrlr->adminq.num_outstanding_reqs++; 854 855 return 0; 856 } 857 858 int32_t 859 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 860 { 861 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 862 } 863 864 uint32_t 865 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 866 { 867 return ns->id; 868 } 869 870 struct spdk_nvme_ctrlr * 871 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 872 { 873 return ns->ctrlr; 874 } 875 876 static inline struct spdk_nvme_ns_data * 877 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 878 { 879 return &ns->ctrlr->nsdata[ns->id - 1]; 880 } 881 882 const struct spdk_nvme_ns_data * 883 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 884 { 885 return _nvme_ns_get_data(ns); 886 } 887 888 uint64_t 889 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 890 { 891 return _nvme_ns_get_data(ns)->nsze; 892 } 893 894 const struct spdk_uuid * 895 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 896 { 897 return &ns->uuid; 898 } 899 900 int 901 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 902 void *metadata, uint64_t lba, uint32_t lba_count, 903 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 904 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 905 { 906 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 907 } 908 909 int 910 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 911 void *buffer, void *metadata, uint64_t lba, 912 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 913 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 914 { 915 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 916 } 917 918 int 919 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 920 uint64_t lba, uint32_t lba_count, 921 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 922 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 923 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 924 uint16_t apptag_mask, uint16_t apptag) 925 { 926 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 927 } 928 929 int 930 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 931 uint64_t lba, uint32_t lba_count, 932 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 933 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 934 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 935 uint16_t apptag_mask, uint16_t apptag) 936 { 937 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 938 } 939 940 static bool g_ut_readv_ext_called; 941 int 942 spdk_nvme_ns_cmd_readv_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 943 uint64_t lba, uint32_t lba_count, 944 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 945 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 946 spdk_nvme_req_next_sge_cb next_sge_fn, 947 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 948 { 949 g_ut_readv_ext_called = true; 950 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 951 } 952 953 static bool g_ut_writev_ext_called; 954 int 955 spdk_nvme_ns_cmd_writev_ext(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 956 uint64_t lba, uint32_t lba_count, 957 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 958 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 959 spdk_nvme_req_next_sge_cb next_sge_fn, 960 struct spdk_nvme_ns_cmd_ext_io_opts *opts) 961 { 962 g_ut_writev_ext_called = true; 963 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 964 } 965 966 int 967 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 968 uint64_t lba, uint32_t lba_count, 969 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 970 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 971 spdk_nvme_req_next_sge_cb next_sge_fn, 972 void *metadata, uint16_t apptag_mask, uint16_t apptag) 973 { 974 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 975 } 976 977 int 978 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 979 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 980 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 981 { 982 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 983 } 984 985 int 986 spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 987 uint64_t lba, uint32_t lba_count, 988 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 989 uint32_t io_flags) 990 { 991 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE_ZEROES, cb_fn, cb_arg); 992 } 993 994 struct spdk_nvme_poll_group * 995 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 996 { 997 struct spdk_nvme_poll_group *group; 998 999 group = calloc(1, sizeof(*group)); 1000 if (group == NULL) { 1001 return NULL; 1002 } 1003 1004 group->ctx = ctx; 1005 if (table != NULL) { 1006 group->accel_fn_table = *table; 1007 } 1008 TAILQ_INIT(&group->qpairs); 1009 1010 return group; 1011 } 1012 1013 int 1014 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 1015 { 1016 if (!TAILQ_EMPTY(&group->qpairs)) { 1017 return -EBUSY; 1018 } 1019 1020 free(group); 1021 1022 return 0; 1023 } 1024 1025 int32_t 1026 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 1027 uint32_t max_completions) 1028 { 1029 struct ut_nvme_req *req, *tmp; 1030 uint32_t num_completions = 0; 1031 1032 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 1033 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 1034 qpair->num_outstanding_reqs--; 1035 1036 req->cb_fn(req->cb_arg, &req->cpl); 1037 1038 free(req); 1039 num_completions++; 1040 } 1041 1042 return num_completions; 1043 } 1044 1045 int64_t 1046 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 1047 uint32_t completions_per_qpair, 1048 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 1049 { 1050 struct spdk_nvme_qpair *qpair, *tmp_qpair; 1051 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 1052 1053 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 1054 1055 if (disconnected_qpair_cb == NULL) { 1056 return -EINVAL; 1057 } 1058 1059 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) { 1060 if (qpair->is_connected) { 1061 local_completions = spdk_nvme_qpair_process_completions(qpair, 1062 completions_per_qpair); 1063 if (local_completions < 0 && error_reason == 0) { 1064 error_reason = local_completions; 1065 } else { 1066 num_completions += local_completions; 1067 assert(num_completions >= 0); 1068 } 1069 } 1070 } 1071 1072 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) { 1073 if (!qpair->is_connected) { 1074 disconnected_qpair_cb(qpair, group->ctx); 1075 } 1076 } 1077 1078 return error_reason ? error_reason : num_completions; 1079 } 1080 1081 int 1082 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 1083 struct spdk_nvme_qpair *qpair) 1084 { 1085 CU_ASSERT(!qpair->is_connected); 1086 1087 qpair->poll_group = group; 1088 TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq); 1089 1090 return 0; 1091 } 1092 1093 int 1094 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 1095 struct spdk_nvme_qpair *qpair) 1096 { 1097 CU_ASSERT(!qpair->is_connected); 1098 1099 TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq); 1100 1101 return 0; 1102 } 1103 1104 int 1105 spdk_bdev_register(struct spdk_bdev *bdev) 1106 { 1107 return g_ut_register_bdev_status; 1108 } 1109 1110 void 1111 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 1112 { 1113 int rc; 1114 1115 rc = bdev->fn_table->destruct(bdev->ctxt); 1116 if (rc <= 0 && cb_fn != NULL) { 1117 cb_fn(cb_arg, rc); 1118 } 1119 } 1120 1121 int 1122 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 1123 { 1124 bdev->blockcnt = size; 1125 1126 return 0; 1127 } 1128 1129 struct spdk_io_channel * 1130 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 1131 { 1132 return (struct spdk_io_channel *)bdev_io->internal.ch; 1133 } 1134 1135 void 1136 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 1137 { 1138 bdev_io->internal.status = status; 1139 bdev_io->internal.in_submit_request = false; 1140 } 1141 1142 void 1143 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 1144 { 1145 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 1146 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 1147 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 1148 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 1149 } else { 1150 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 1151 } 1152 1153 bdev_io->internal.error.nvme.cdw0 = cdw0; 1154 bdev_io->internal.error.nvme.sct = sct; 1155 bdev_io->internal.error.nvme.sc = sc; 1156 1157 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 1158 } 1159 1160 void 1161 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 1162 { 1163 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 1164 1165 ut_bdev_io_set_buf(bdev_io); 1166 1167 cb(ch, bdev_io, true); 1168 } 1169 1170 static void 1171 test_create_ctrlr(void) 1172 { 1173 struct spdk_nvme_transport_id trid = {}; 1174 struct spdk_nvme_ctrlr ctrlr = {}; 1175 int rc; 1176 1177 ut_init_trid(&trid); 1178 1179 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1180 CU_ASSERT(rc == 0); 1181 1182 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1183 1184 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1185 CU_ASSERT(rc == 0); 1186 1187 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") != NULL); 1188 1189 poll_threads(); 1190 1191 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1192 } 1193 1194 static void 1195 test_reset_ctrlr(void) 1196 { 1197 struct spdk_nvme_transport_id trid = {}; 1198 struct spdk_nvme_ctrlr ctrlr = {}; 1199 struct nvme_ctrlr *nvme_ctrlr = NULL; 1200 struct nvme_ctrlr_trid *curr_trid; 1201 struct spdk_io_channel *ch1, *ch2; 1202 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1203 int rc; 1204 1205 ut_init_trid(&trid); 1206 TAILQ_INIT(&ctrlr.active_io_qpairs); 1207 1208 set_thread(0); 1209 1210 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1211 CU_ASSERT(rc == 0); 1212 1213 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1214 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1215 1216 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1217 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1218 1219 ch1 = spdk_get_io_channel(nvme_ctrlr); 1220 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1221 1222 ctrlr_ch1 = spdk_io_channel_get_ctx(ch1); 1223 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1224 1225 set_thread(1); 1226 1227 ch2 = spdk_get_io_channel(nvme_ctrlr); 1228 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1229 1230 ctrlr_ch2 = spdk_io_channel_get_ctx(ch2); 1231 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1232 1233 /* Reset starts from thread 1. */ 1234 set_thread(1); 1235 1236 /* Case 1: ctrlr is already being destructed. */ 1237 nvme_ctrlr->destruct = true; 1238 1239 rc = bdev_nvme_reset(nvme_ctrlr); 1240 CU_ASSERT(rc == -ENXIO); 1241 1242 /* Case 2: reset is in progress. */ 1243 nvme_ctrlr->destruct = false; 1244 nvme_ctrlr->resetting = true; 1245 1246 rc = bdev_nvme_reset(nvme_ctrlr); 1247 CU_ASSERT(rc == -EBUSY); 1248 1249 /* Case 3: reset completes successfully. */ 1250 nvme_ctrlr->resetting = false; 1251 curr_trid->is_failed = true; 1252 ctrlr.is_failed = true; 1253 1254 rc = bdev_nvme_reset(nvme_ctrlr); 1255 CU_ASSERT(rc == 0); 1256 CU_ASSERT(nvme_ctrlr->resetting == true); 1257 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1258 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1259 1260 poll_thread_times(0, 1); 1261 CU_ASSERT(ctrlr_ch1->qpair == NULL); 1262 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1263 1264 poll_thread_times(1, 1); 1265 CU_ASSERT(ctrlr_ch1->qpair == NULL); 1266 CU_ASSERT(ctrlr_ch2->qpair == NULL); 1267 CU_ASSERT(ctrlr.is_failed == true); 1268 1269 poll_thread_times(1, 1); 1270 CU_ASSERT(ctrlr.is_failed == false); 1271 1272 poll_thread_times(0, 1); 1273 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1274 CU_ASSERT(ctrlr_ch2->qpair == NULL); 1275 1276 poll_thread_times(1, 1); 1277 CU_ASSERT(ctrlr_ch1->qpair != NULL); 1278 CU_ASSERT(ctrlr_ch2->qpair != NULL); 1279 CU_ASSERT(nvme_ctrlr->resetting == true); 1280 CU_ASSERT(curr_trid->is_failed == true); 1281 1282 poll_thread_times(1, 1); 1283 CU_ASSERT(nvme_ctrlr->resetting == false); 1284 CU_ASSERT(curr_trid->is_failed == false); 1285 1286 spdk_put_io_channel(ch2); 1287 1288 set_thread(0); 1289 1290 spdk_put_io_channel(ch1); 1291 1292 poll_threads(); 1293 1294 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1295 CU_ASSERT(rc == 0); 1296 1297 poll_threads(); 1298 1299 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1300 } 1301 1302 static void 1303 test_race_between_reset_and_destruct_ctrlr(void) 1304 { 1305 struct spdk_nvme_transport_id trid = {}; 1306 struct spdk_nvme_ctrlr ctrlr = {}; 1307 struct nvme_ctrlr *nvme_ctrlr; 1308 struct spdk_io_channel *ch1, *ch2; 1309 int rc; 1310 1311 ut_init_trid(&trid); 1312 TAILQ_INIT(&ctrlr.active_io_qpairs); 1313 1314 set_thread(0); 1315 1316 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1317 CU_ASSERT(rc == 0); 1318 1319 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1320 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1321 1322 ch1 = spdk_get_io_channel(nvme_ctrlr); 1323 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1324 1325 set_thread(1); 1326 1327 ch2 = spdk_get_io_channel(nvme_ctrlr); 1328 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1329 1330 /* Reset starts from thread 1. */ 1331 set_thread(1); 1332 1333 rc = bdev_nvme_reset(nvme_ctrlr); 1334 CU_ASSERT(rc == 0); 1335 CU_ASSERT(nvme_ctrlr->resetting == true); 1336 1337 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1338 set_thread(0); 1339 1340 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1341 CU_ASSERT(rc == 0); 1342 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1343 CU_ASSERT(nvme_ctrlr->destruct == true); 1344 CU_ASSERT(nvme_ctrlr->resetting == true); 1345 1346 poll_threads(); 1347 1348 /* Reset completed but ctrlr is not still destructed yet. */ 1349 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1350 CU_ASSERT(nvme_ctrlr->destruct == true); 1351 CU_ASSERT(nvme_ctrlr->resetting == false); 1352 1353 /* New reset request is rejected. */ 1354 rc = bdev_nvme_reset(nvme_ctrlr); 1355 CU_ASSERT(rc == -ENXIO); 1356 1357 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1358 * However there are two channels and destruct is not completed yet. 1359 */ 1360 poll_threads(); 1361 1362 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 1363 1364 set_thread(0); 1365 1366 spdk_put_io_channel(ch1); 1367 1368 set_thread(1); 1369 1370 spdk_put_io_channel(ch2); 1371 1372 poll_threads(); 1373 1374 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1375 } 1376 1377 static void 1378 test_failover_ctrlr(void) 1379 { 1380 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1381 struct spdk_nvme_ctrlr ctrlr = {}; 1382 struct nvme_ctrlr *nvme_ctrlr = NULL; 1383 struct nvme_ctrlr_trid *curr_trid, *next_trid; 1384 struct spdk_io_channel *ch1, *ch2; 1385 int rc; 1386 1387 ut_init_trid(&trid1); 1388 ut_init_trid2(&trid2); 1389 TAILQ_INIT(&ctrlr.active_io_qpairs); 1390 1391 set_thread(0); 1392 1393 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1394 CU_ASSERT(rc == 0); 1395 1396 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1397 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1398 1399 ch1 = spdk_get_io_channel(nvme_ctrlr); 1400 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1401 1402 set_thread(1); 1403 1404 ch2 = spdk_get_io_channel(nvme_ctrlr); 1405 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1406 1407 /* First, test one trid case. */ 1408 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1409 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1410 1411 /* Failover starts from thread 1. */ 1412 set_thread(1); 1413 1414 /* Case 1: ctrlr is already being destructed. */ 1415 nvme_ctrlr->destruct = true; 1416 1417 rc = bdev_nvme_failover(nvme_ctrlr, false); 1418 CU_ASSERT(rc == -ENXIO); 1419 CU_ASSERT(curr_trid->is_failed == false); 1420 1421 /* Case 2: reset is in progress. */ 1422 nvme_ctrlr->destruct = false; 1423 nvme_ctrlr->resetting = true; 1424 1425 rc = bdev_nvme_failover(nvme_ctrlr, false); 1426 CU_ASSERT(rc == 0); 1427 1428 /* Case 3: failover is in progress. */ 1429 nvme_ctrlr->failover_in_progress = true; 1430 1431 rc = bdev_nvme_failover(nvme_ctrlr, false); 1432 CU_ASSERT(rc == 0); 1433 CU_ASSERT(curr_trid->is_failed == false); 1434 1435 /* Case 4: reset completes successfully. */ 1436 nvme_ctrlr->resetting = false; 1437 nvme_ctrlr->failover_in_progress = false; 1438 1439 rc = bdev_nvme_failover(nvme_ctrlr, false); 1440 CU_ASSERT(rc == 0); 1441 1442 CU_ASSERT(nvme_ctrlr->resetting == true); 1443 CU_ASSERT(curr_trid->is_failed == true); 1444 1445 poll_threads(); 1446 1447 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1448 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1449 1450 CU_ASSERT(nvme_ctrlr->resetting == false); 1451 CU_ASSERT(curr_trid->is_failed == false); 1452 1453 set_thread(0); 1454 1455 /* Second, test two trids case. */ 1456 rc = bdev_nvme_add_secondary_trid(nvme_ctrlr, &ctrlr, &trid2); 1457 CU_ASSERT(rc == 0); 1458 1459 curr_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1460 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1461 CU_ASSERT(curr_trid == nvme_ctrlr->connected_trid); 1462 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1463 1464 /* Failover starts from thread 1. */ 1465 set_thread(1); 1466 1467 /* Case 5: reset is in progress. */ 1468 nvme_ctrlr->resetting = true; 1469 1470 rc = bdev_nvme_failover(nvme_ctrlr, false); 1471 CU_ASSERT(rc == -EBUSY); 1472 1473 /* Case 5: failover is in progress. */ 1474 nvme_ctrlr->failover_in_progress = true; 1475 1476 rc = bdev_nvme_failover(nvme_ctrlr, false); 1477 CU_ASSERT(rc == 0); 1478 1479 /* Case 6: failover completes successfully. */ 1480 nvme_ctrlr->resetting = false; 1481 nvme_ctrlr->failover_in_progress = false; 1482 1483 rc = bdev_nvme_failover(nvme_ctrlr, false); 1484 CU_ASSERT(rc == 0); 1485 1486 CU_ASSERT(nvme_ctrlr->resetting == true); 1487 CU_ASSERT(nvme_ctrlr->failover_in_progress == true); 1488 1489 next_trid = TAILQ_FIRST(&nvme_ctrlr->trids); 1490 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1491 CU_ASSERT(next_trid != curr_trid); 1492 CU_ASSERT(next_trid == nvme_ctrlr->connected_trid); 1493 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1494 1495 poll_threads(); 1496 1497 CU_ASSERT(nvme_ctrlr->resetting == false); 1498 CU_ASSERT(nvme_ctrlr->failover_in_progress == false); 1499 1500 spdk_put_io_channel(ch2); 1501 1502 set_thread(0); 1503 1504 spdk_put_io_channel(ch1); 1505 1506 poll_threads(); 1507 1508 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1509 CU_ASSERT(rc == 0); 1510 1511 poll_threads(); 1512 1513 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1514 } 1515 1516 static void 1517 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1518 { 1519 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1520 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1521 } 1522 1523 static void 1524 test_pending_reset(void) 1525 { 1526 struct spdk_nvme_transport_id trid = {}; 1527 struct spdk_nvme_ctrlr *ctrlr; 1528 struct nvme_ctrlr *nvme_ctrlr = NULL; 1529 const int STRING_SIZE = 32; 1530 const char *attached_names[STRING_SIZE]; 1531 struct nvme_bdev *bdev; 1532 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1533 struct spdk_io_channel *ch1, *ch2; 1534 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 1535 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 1536 int rc; 1537 1538 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1539 ut_init_trid(&trid); 1540 1541 set_thread(0); 1542 1543 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1544 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1545 1546 g_ut_attach_ctrlr_status = 0; 1547 g_ut_attach_bdev_count = 1; 1548 1549 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 1550 attach_ctrlr_done, NULL, NULL, false); 1551 CU_ASSERT(rc == 0); 1552 1553 spdk_delay_us(1000); 1554 poll_threads(); 1555 1556 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1557 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1558 1559 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1560 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1561 1562 ch1 = spdk_get_io_channel(bdev); 1563 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1564 1565 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 1566 ctrlr_ch1 = nbdev_ch1->ctrlr_ch; 1567 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 1568 1569 first_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch1); 1570 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1571 1572 set_thread(1); 1573 1574 ch2 = spdk_get_io_channel(bdev); 1575 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1576 1577 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 1578 ctrlr_ch2 = nbdev_ch2->ctrlr_ch; 1579 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 1580 1581 second_bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_RESET, bdev, ch2); 1582 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1583 1584 /* The first reset request is submitted on thread 1, and the second reset request 1585 * is submitted on thread 0 while processing the first request. 1586 */ 1587 bdev_nvme_submit_request(ch2, first_bdev_io); 1588 CU_ASSERT(nvme_ctrlr->resetting == true); 1589 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1590 1591 set_thread(0); 1592 1593 bdev_nvme_submit_request(ch1, second_bdev_io); 1594 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1595 1596 poll_threads(); 1597 1598 CU_ASSERT(nvme_ctrlr->resetting == false); 1599 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1600 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1601 1602 /* The first reset request is submitted on thread 1, and the second reset request 1603 * is submitted on thread 0 while processing the first request. 1604 * 1605 * The difference from the above scenario is that the controller is removed while 1606 * processing the first request. Hence both reset requests should fail. 1607 */ 1608 set_thread(1); 1609 1610 bdev_nvme_submit_request(ch2, first_bdev_io); 1611 CU_ASSERT(nvme_ctrlr->resetting == true); 1612 CU_ASSERT(TAILQ_EMPTY(&ctrlr_ch2->pending_resets)); 1613 1614 set_thread(0); 1615 1616 bdev_nvme_submit_request(ch1, second_bdev_io); 1617 CU_ASSERT(TAILQ_FIRST(&ctrlr_ch1->pending_resets) == second_bdev_io); 1618 1619 ctrlr->fail_reset = true; 1620 1621 poll_threads(); 1622 1623 CU_ASSERT(nvme_ctrlr->resetting == false); 1624 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1625 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1626 1627 spdk_put_io_channel(ch1); 1628 1629 set_thread(1); 1630 1631 spdk_put_io_channel(ch2); 1632 1633 poll_threads(); 1634 1635 set_thread(0); 1636 1637 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1638 CU_ASSERT(rc == 0); 1639 1640 poll_threads(); 1641 1642 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1643 1644 free(first_bdev_io); 1645 free(second_bdev_io); 1646 } 1647 1648 static void 1649 test_attach_ctrlr(void) 1650 { 1651 struct spdk_nvme_transport_id trid = {}; 1652 struct spdk_nvme_ctrlr *ctrlr; 1653 struct nvme_ctrlr *nvme_ctrlr; 1654 const int STRING_SIZE = 32; 1655 const char *attached_names[STRING_SIZE]; 1656 struct nvme_bdev *nbdev; 1657 int rc; 1658 1659 set_thread(0); 1660 1661 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1662 ut_init_trid(&trid); 1663 1664 /* If ctrlr fails, no nvme_ctrlr is created. Failed ctrlr is removed 1665 * by probe polling. 1666 */ 1667 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1668 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1669 1670 ctrlr->is_failed = true; 1671 g_ut_attach_ctrlr_status = -EIO; 1672 g_ut_attach_bdev_count = 0; 1673 1674 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 1675 attach_ctrlr_done, NULL, NULL, false); 1676 CU_ASSERT(rc == 0); 1677 1678 spdk_delay_us(1000); 1679 poll_threads(); 1680 1681 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1682 1683 /* If ctrlr has no namespace, one nvme_ctrlr with no namespace is created */ 1684 ctrlr = ut_attach_ctrlr(&trid, 0, false, false); 1685 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1686 1687 g_ut_attach_ctrlr_status = 0; 1688 1689 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 1690 attach_ctrlr_done, NULL, NULL, false); 1691 CU_ASSERT(rc == 0); 1692 1693 spdk_delay_us(1000); 1694 poll_threads(); 1695 1696 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1697 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1698 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1699 CU_ASSERT(nvme_ctrlr->num_ns == 0); 1700 1701 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1702 CU_ASSERT(rc == 0); 1703 1704 poll_threads(); 1705 1706 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1707 1708 /* If ctrlr has one namespace, one nvme_ctrlr with one namespace and 1709 * one nvme_bdev is created. 1710 */ 1711 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1712 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1713 1714 g_ut_attach_bdev_count = 1; 1715 1716 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 1717 attach_ctrlr_done, NULL, NULL, false); 1718 CU_ASSERT(rc == 0); 1719 1720 spdk_delay_us(1000); 1721 poll_threads(); 1722 1723 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1724 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1725 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1726 CU_ASSERT(nvme_ctrlr->num_ns == 1); 1727 1728 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 1729 attached_names[0] = NULL; 1730 1731 nbdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 1732 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 1733 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 1734 1735 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1736 CU_ASSERT(rc == 0); 1737 1738 poll_threads(); 1739 1740 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1741 1742 /* Ctrlr has one namespace but one nvme_ctrlr with no namespace is 1743 * created because creating one nvme_bdev failed. 1744 */ 1745 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1746 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1747 1748 g_ut_register_bdev_status = -EINVAL; 1749 g_ut_attach_bdev_count = 0; 1750 1751 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 1752 attach_ctrlr_done, NULL, NULL, false); 1753 CU_ASSERT(rc == 0); 1754 1755 spdk_delay_us(1000); 1756 poll_threads(); 1757 1758 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1759 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1760 CU_ASSERT(nvme_ctrlr->ctrlr == ctrlr); 1761 CU_ASSERT(nvme_ctrlr->num_ns == 1); 1762 1763 CU_ASSERT(attached_names[0] == NULL); 1764 1765 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1766 CU_ASSERT(rc == 0); 1767 1768 poll_threads(); 1769 1770 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1771 1772 g_ut_register_bdev_status = 0; 1773 } 1774 1775 static void 1776 test_aer_cb(void) 1777 { 1778 struct spdk_nvme_transport_id trid = {}; 1779 struct spdk_nvme_ctrlr *ctrlr; 1780 struct nvme_ctrlr *nvme_ctrlr; 1781 struct nvme_bdev *bdev; 1782 const int STRING_SIZE = 32; 1783 const char *attached_names[STRING_SIZE]; 1784 union spdk_nvme_async_event_completion event = {}; 1785 struct spdk_nvme_cpl cpl = {}; 1786 int rc; 1787 1788 set_thread(0); 1789 1790 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1791 ut_init_trid(&trid); 1792 1793 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 1794 * namespaces are populated. 1795 */ 1796 ctrlr = ut_attach_ctrlr(&trid, 4, true, false); 1797 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1798 1799 ctrlr->ns[0].is_active = false; 1800 1801 g_ut_attach_ctrlr_status = 0; 1802 g_ut_attach_bdev_count = 3; 1803 1804 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 1805 attach_ctrlr_done, NULL, NULL, false); 1806 CU_ASSERT(rc == 0); 1807 1808 spdk_delay_us(1000); 1809 poll_threads(); 1810 1811 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1812 poll_threads(); 1813 1814 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 1815 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 1816 1817 CU_ASSERT(nvme_ctrlr->num_ns == 4); 1818 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) == NULL); 1819 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 1820 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 1821 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 1822 1823 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev; 1824 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1825 CU_ASSERT(bdev->disk.blockcnt == 1024); 1826 1827 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 1828 * change the size of the 4th namespace. 1829 */ 1830 ctrlr->ns[0].is_active = true; 1831 ctrlr->ns[2].is_active = false; 1832 ctrlr->nsdata[3].nsze = 2048; 1833 1834 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 1835 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 1836 cpl.cdw0 = event.raw; 1837 1838 aer_cb(nvme_ctrlr, &cpl); 1839 1840 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 1841 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 1842 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) == NULL); 1843 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 1844 CU_ASSERT(bdev->disk.blockcnt == 2048); 1845 1846 /* Change ANA state of active namespaces. */ 1847 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 1848 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 1849 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 1850 1851 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 1852 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_ANA_CHANGE; 1853 cpl.cdw0 = event.raw; 1854 1855 aer_cb(nvme_ctrlr, &cpl); 1856 1857 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1858 poll_threads(); 1859 1860 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 1861 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 1862 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 1863 1864 rc = bdev_nvme_delete("nvme0", &g_any_trid); 1865 CU_ASSERT(rc == 0); 1866 1867 poll_threads(); 1868 1869 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 1870 } 1871 1872 static void 1873 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1874 enum spdk_bdev_io_type io_type) 1875 { 1876 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 1877 struct spdk_nvme_ns *ns = NULL; 1878 struct spdk_nvme_qpair *qpair = NULL; 1879 1880 CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair)); 1881 1882 bdev_io->type = io_type; 1883 bdev_io->internal.in_submit_request = true; 1884 1885 bdev_nvme_submit_request(ch, bdev_io); 1886 1887 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1888 CU_ASSERT(qpair->num_outstanding_reqs == 1); 1889 1890 poll_threads(); 1891 1892 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1893 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1894 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1895 } 1896 1897 static void 1898 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1899 enum spdk_bdev_io_type io_type) 1900 { 1901 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 1902 struct spdk_nvme_ns *ns = NULL; 1903 struct spdk_nvme_qpair *qpair = NULL; 1904 1905 CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair)); 1906 1907 bdev_io->type = io_type; 1908 bdev_io->internal.in_submit_request = true; 1909 1910 bdev_nvme_submit_request(ch, bdev_io); 1911 1912 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1913 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1914 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1915 } 1916 1917 static void 1918 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 1919 { 1920 struct nvme_bdev_channel *nbdev_ch = spdk_io_channel_get_ctx(ch); 1921 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 1922 struct ut_nvme_req *req; 1923 struct spdk_nvme_ns *ns = NULL; 1924 struct spdk_nvme_qpair *qpair = NULL; 1925 1926 CU_ASSERT(bdev_nvme_find_io_path(nbdev_ch, &ns, &qpair)); 1927 1928 /* Only compare and write now. */ 1929 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 1930 bdev_io->internal.in_submit_request = true; 1931 1932 bdev_nvme_submit_request(ch, bdev_io); 1933 1934 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1935 CU_ASSERT(qpair->num_outstanding_reqs == 2); 1936 CU_ASSERT(bio->first_fused_submitted == true); 1937 1938 /* First outstanding request is compare operation. */ 1939 req = TAILQ_FIRST(&qpair->outstanding_reqs); 1940 SPDK_CU_ASSERT_FATAL(req != NULL); 1941 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 1942 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 1943 1944 poll_threads(); 1945 1946 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1947 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1948 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1949 } 1950 1951 static void 1952 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1953 struct spdk_nvme_ctrlr *ctrlr) 1954 { 1955 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 1956 bdev_io->internal.in_submit_request = true; 1957 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1958 1959 bdev_nvme_submit_request(ch, bdev_io); 1960 1961 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1962 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 1963 1964 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 1965 poll_thread_times(1, 1); 1966 1967 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1968 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 1969 1970 poll_thread_times(0, 1); 1971 1972 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1973 } 1974 1975 static void 1976 test_submit_nvme_cmd(void) 1977 { 1978 struct spdk_nvme_transport_id trid = {}; 1979 struct spdk_nvme_ctrlr *ctrlr; 1980 struct nvme_ctrlr *nvme_ctrlr; 1981 const int STRING_SIZE = 32; 1982 const char *attached_names[STRING_SIZE]; 1983 struct nvme_bdev *bdev; 1984 struct spdk_bdev_io *bdev_io; 1985 struct spdk_io_channel *ch; 1986 struct spdk_bdev_ext_io_opts ext_io_opts = {}; 1987 int rc; 1988 1989 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1990 ut_init_trid(&trid); 1991 1992 set_thread(1); 1993 1994 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 1995 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1996 1997 g_ut_attach_ctrlr_status = 0; 1998 g_ut_attach_bdev_count = 1; 1999 2000 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 2001 attach_ctrlr_done, NULL, NULL, false); 2002 CU_ASSERT(rc == 0); 2003 2004 spdk_delay_us(1000); 2005 poll_threads(); 2006 2007 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2008 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2009 2010 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2011 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2012 2013 set_thread(0); 2014 2015 ch = spdk_get_io_channel(bdev); 2016 SPDK_CU_ASSERT_FATAL(ch != NULL); 2017 2018 bdev_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_INVALID, bdev, ch); 2019 2020 bdev_io->u.bdev.iovs = NULL; 2021 2022 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2023 2024 ut_bdev_io_set_buf(bdev_io); 2025 2026 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2027 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2028 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 2029 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 2030 2031 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 2032 2033 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 2034 2035 /* Verify that ext NVME API is called if bdev_io ext_opts is set */ 2036 bdev_io->internal.ext_opts = &ext_io_opts; 2037 g_ut_readv_ext_called = false; 2038 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 2039 CU_ASSERT(g_ut_readv_ext_called == true); 2040 g_ut_readv_ext_called = false; 2041 2042 g_ut_writev_ext_called = false; 2043 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 2044 CU_ASSERT(g_ut_writev_ext_called == true); 2045 g_ut_writev_ext_called = false; 2046 bdev_io->internal.ext_opts = NULL; 2047 2048 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 2049 2050 free(bdev_io); 2051 2052 spdk_put_io_channel(ch); 2053 2054 poll_threads(); 2055 2056 set_thread(1); 2057 2058 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2059 CU_ASSERT(rc == 0); 2060 2061 poll_threads(); 2062 2063 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2064 } 2065 2066 static void 2067 test_add_remove_trid(void) 2068 { 2069 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 2070 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2, *ctrlr3; 2071 struct nvme_ctrlr *nvme_ctrlr = NULL; 2072 const int STRING_SIZE = 32; 2073 const char *attached_names[STRING_SIZE]; 2074 struct nvme_ctrlr_trid *ctrid; 2075 int rc; 2076 2077 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2078 ut_init_trid(&trid1); 2079 ut_init_trid2(&trid2); 2080 ut_init_trid3(&trid3); 2081 2082 set_thread(0); 2083 2084 g_ut_attach_ctrlr_status = 0; 2085 g_ut_attach_bdev_count = 0; 2086 2087 ctrlr1 = ut_attach_ctrlr(&trid1, 0, false, false); 2088 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2089 2090 rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0, 2091 attach_ctrlr_done, NULL, NULL, false); 2092 CU_ASSERT(rc == 0); 2093 2094 spdk_delay_us(1000); 2095 poll_threads(); 2096 2097 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2098 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2099 2100 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->connected_trid->trid, &trid1) == 0); 2101 2102 ctrlr2 = ut_attach_ctrlr(&trid2, 0, false, false); 2103 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2104 2105 rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0, 2106 attach_ctrlr_done, NULL, NULL, false); 2107 CU_ASSERT(rc == 0); 2108 2109 spdk_delay_us(1000); 2110 poll_threads(); 2111 2112 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->connected_trid->trid, &trid1) == 0); 2113 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2114 if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) { 2115 break; 2116 } 2117 } 2118 CU_ASSERT(ctrid != NULL); 2119 2120 /* trid3 is not in the registered list. */ 2121 rc = bdev_nvme_delete("nvme0", &trid3); 2122 CU_ASSERT(rc == -ENXIO); 2123 2124 /* trid2 is not used, and simply removed. */ 2125 rc = bdev_nvme_delete("nvme0", &trid2); 2126 CU_ASSERT(rc == 0); 2127 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2128 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2129 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0); 2130 } 2131 2132 ctrlr3 = ut_attach_ctrlr(&trid3, 0, false, false); 2133 SPDK_CU_ASSERT_FATAL(ctrlr3 != NULL); 2134 2135 rc = bdev_nvme_create(&trid3, "nvme0", attached_names, STRING_SIZE, 0, 2136 attach_ctrlr_done, NULL, NULL, false); 2137 CU_ASSERT(rc == 0); 2138 2139 spdk_delay_us(1000); 2140 poll_threads(); 2141 2142 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->connected_trid->trid, &trid1) == 0); 2143 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2144 if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid3) == 0) { 2145 break; 2146 } 2147 } 2148 CU_ASSERT(ctrid != NULL); 2149 2150 /* trid1 is currently used and trid3 is an alternative path. 2151 * If we remove trid1, path is changed to trid3. 2152 */ 2153 rc = bdev_nvme_delete("nvme0", &trid1); 2154 CU_ASSERT(rc == 0); 2155 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2156 CU_ASSERT(nvme_ctrlr->resetting == true); 2157 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2158 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0); 2159 } 2160 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->connected_trid->trid, &trid3) == 0); 2161 2162 poll_threads(); 2163 2164 CU_ASSERT(nvme_ctrlr->resetting == false); 2165 2166 /* trid3 is the current and only path. If we remove trid3, the corresponding 2167 * nvme_ctrlr is removed. 2168 */ 2169 rc = bdev_nvme_delete("nvme0", &trid3); 2170 CU_ASSERT(rc == 0); 2171 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2172 2173 poll_threads(); 2174 2175 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2176 2177 ctrlr1 = ut_attach_ctrlr(&trid1, 0, false, false); 2178 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2179 2180 rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0, 2181 attach_ctrlr_done, NULL, NULL, false); 2182 CU_ASSERT(rc == 0); 2183 2184 spdk_delay_us(1000); 2185 poll_threads(); 2186 2187 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2188 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2189 2190 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->connected_trid->trid, &trid1) == 0); 2191 2192 ctrlr2 = ut_attach_ctrlr(&trid2, 0, false, false); 2193 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2194 2195 rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0, 2196 attach_ctrlr_done, NULL, NULL, false); 2197 CU_ASSERT(rc == 0); 2198 2199 spdk_delay_us(1000); 2200 poll_threads(); 2201 2202 CU_ASSERT(spdk_nvme_transport_id_compare(&nvme_ctrlr->connected_trid->trid, &trid1) == 0); 2203 TAILQ_FOREACH(ctrid, &nvme_ctrlr->trids, link) { 2204 if (spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) == 0) { 2205 break; 2206 } 2207 } 2208 CU_ASSERT(ctrid != NULL); 2209 2210 /* If trid is not specified, nvme_ctrlr itself is removed. */ 2211 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2212 CU_ASSERT(rc == 0); 2213 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == nvme_ctrlr); 2214 2215 poll_threads(); 2216 2217 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2218 } 2219 2220 static void 2221 test_abort(void) 2222 { 2223 struct spdk_nvme_transport_id trid = {}; 2224 struct spdk_nvme_ctrlr *ctrlr; 2225 struct nvme_ctrlr *nvme_ctrlr; 2226 const int STRING_SIZE = 32; 2227 const char *attached_names[STRING_SIZE]; 2228 struct nvme_bdev *bdev; 2229 struct spdk_bdev_io *write_io, *admin_io, *abort_io; 2230 struct spdk_io_channel *ch1, *ch2; 2231 struct nvme_bdev_channel *nbdev_ch1; 2232 struct nvme_ctrlr_channel *ctrlr_ch1; 2233 int rc; 2234 2235 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2236 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2237 * are submitted on thread 1. Both should succeed. 2238 */ 2239 2240 ut_init_trid(&trid); 2241 2242 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2243 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2244 2245 g_ut_attach_ctrlr_status = 0; 2246 g_ut_attach_bdev_count = 1; 2247 2248 set_thread(1); 2249 2250 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 2251 attach_ctrlr_done, NULL, NULL, false); 2252 CU_ASSERT(rc == 0); 2253 2254 spdk_delay_us(1000); 2255 poll_threads(); 2256 2257 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2258 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2259 2260 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2261 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2262 2263 write_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_WRITE, bdev, NULL); 2264 ut_bdev_io_set_buf(write_io); 2265 2266 admin_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_NVME_ADMIN, bdev, NULL); 2267 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2268 2269 abort_io = ut_alloc_bdev_io(SPDK_BDEV_IO_TYPE_ABORT, bdev, NULL); 2270 2271 set_thread(0); 2272 2273 ch1 = spdk_get_io_channel(bdev); 2274 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2275 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2276 ctrlr_ch1 = nbdev_ch1->ctrlr_ch; 2277 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 2278 2279 set_thread(1); 2280 2281 ch2 = spdk_get_io_channel(bdev); 2282 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2283 2284 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2285 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2286 2287 /* Aborting the already completed request should fail. */ 2288 write_io->internal.in_submit_request = true; 2289 bdev_nvme_submit_request(ch1, write_io); 2290 poll_threads(); 2291 2292 CU_ASSERT(write_io->internal.in_submit_request == false); 2293 2294 abort_io->u.abort.bio_to_abort = write_io; 2295 abort_io->internal.in_submit_request = true; 2296 2297 bdev_nvme_submit_request(ch1, abort_io); 2298 2299 poll_threads(); 2300 2301 CU_ASSERT(abort_io->internal.in_submit_request == false); 2302 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2303 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2304 2305 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2306 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2307 2308 admin_io->internal.in_submit_request = true; 2309 bdev_nvme_submit_request(ch1, admin_io); 2310 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2311 poll_threads(); 2312 2313 CU_ASSERT(admin_io->internal.in_submit_request == false); 2314 2315 abort_io->u.abort.bio_to_abort = admin_io; 2316 abort_io->internal.in_submit_request = true; 2317 2318 bdev_nvme_submit_request(ch2, abort_io); 2319 2320 poll_threads(); 2321 2322 CU_ASSERT(abort_io->internal.in_submit_request == false); 2323 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2324 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2325 2326 /* Aborting the write request should succeed. */ 2327 write_io->internal.in_submit_request = true; 2328 bdev_nvme_submit_request(ch1, write_io); 2329 2330 CU_ASSERT(write_io->internal.in_submit_request == true); 2331 CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 1); 2332 2333 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2334 abort_io->u.abort.bio_to_abort = write_io; 2335 abort_io->internal.in_submit_request = true; 2336 2337 bdev_nvme_submit_request(ch1, abort_io); 2338 2339 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2340 poll_threads(); 2341 2342 CU_ASSERT(abort_io->internal.in_submit_request == false); 2343 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2344 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2345 CU_ASSERT(write_io->internal.in_submit_request == false); 2346 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2347 CU_ASSERT(ctrlr_ch1->qpair->num_outstanding_reqs == 0); 2348 2349 /* Aborting the admin request should succeed. */ 2350 admin_io->internal.in_submit_request = true; 2351 bdev_nvme_submit_request(ch1, admin_io); 2352 2353 CU_ASSERT(admin_io->internal.in_submit_request == true); 2354 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2355 2356 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2357 abort_io->u.abort.bio_to_abort = admin_io; 2358 abort_io->internal.in_submit_request = true; 2359 2360 bdev_nvme_submit_request(ch2, abort_io); 2361 2362 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2363 poll_threads(); 2364 2365 CU_ASSERT(abort_io->internal.in_submit_request == false); 2366 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2367 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2368 CU_ASSERT(admin_io->internal.in_submit_request == false); 2369 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2370 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2371 2372 set_thread(0); 2373 2374 spdk_put_io_channel(ch1); 2375 2376 set_thread(1); 2377 2378 spdk_put_io_channel(ch2); 2379 2380 poll_threads(); 2381 2382 free(write_io); 2383 free(admin_io); 2384 free(abort_io); 2385 2386 set_thread(1); 2387 2388 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2389 CU_ASSERT(rc == 0); 2390 2391 poll_threads(); 2392 2393 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2394 } 2395 2396 static void 2397 test_get_io_qpair(void) 2398 { 2399 struct spdk_nvme_transport_id trid = {}; 2400 struct spdk_nvme_ctrlr ctrlr = {}; 2401 struct nvme_ctrlr *nvme_ctrlr = NULL; 2402 struct spdk_io_channel *ch; 2403 struct nvme_ctrlr_channel *ctrlr_ch; 2404 struct spdk_nvme_qpair *qpair; 2405 int rc; 2406 2407 ut_init_trid(&trid); 2408 TAILQ_INIT(&ctrlr.active_io_qpairs); 2409 2410 set_thread(0); 2411 2412 rc = nvme_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 2413 CU_ASSERT(rc == 0); 2414 2415 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2416 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2417 2418 ch = spdk_get_io_channel(nvme_ctrlr); 2419 SPDK_CU_ASSERT_FATAL(ch != NULL); 2420 ctrlr_ch = spdk_io_channel_get_ctx(ch); 2421 CU_ASSERT(ctrlr_ch->qpair != NULL); 2422 2423 qpair = bdev_nvme_get_io_qpair(ch); 2424 CU_ASSERT(qpair == ctrlr_ch->qpair); 2425 2426 spdk_put_io_channel(ch); 2427 2428 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2429 CU_ASSERT(rc == 0); 2430 2431 poll_threads(); 2432 2433 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2434 } 2435 2436 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2437 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2438 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2439 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2440 */ 2441 static void 2442 test_bdev_unregister(void) 2443 { 2444 struct spdk_nvme_transport_id trid = {}; 2445 struct spdk_nvme_ctrlr *ctrlr; 2446 struct nvme_ctrlr *nvme_ctrlr; 2447 struct nvme_ns *nvme_ns1, *nvme_ns2; 2448 const int STRING_SIZE = 32; 2449 const char *attached_names[STRING_SIZE]; 2450 struct nvme_bdev *bdev1, *bdev2; 2451 int rc; 2452 2453 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2454 ut_init_trid(&trid); 2455 2456 ctrlr = ut_attach_ctrlr(&trid, 2, false, false); 2457 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2458 2459 g_ut_attach_ctrlr_status = 0; 2460 g_ut_attach_bdev_count = 2; 2461 2462 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 2463 attach_ctrlr_done, NULL, NULL, false); 2464 CU_ASSERT(rc == 0); 2465 2466 spdk_delay_us(1000); 2467 poll_threads(); 2468 2469 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2470 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2471 2472 nvme_ns1 = nvme_ctrlr_get_ns(nvme_ctrlr, 1); 2473 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2474 2475 bdev1 = nvme_ns1->bdev; 2476 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2477 2478 nvme_ns2 = nvme_ctrlr_get_ns(nvme_ctrlr, 2); 2479 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2480 2481 bdev2 = nvme_ns2->bdev; 2482 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2483 2484 bdev_nvme_destruct(&bdev1->disk); 2485 bdev_nvme_destruct(&bdev2->disk); 2486 2487 poll_threads(); 2488 2489 CU_ASSERT(nvme_ns1->bdev == NULL); 2490 CU_ASSERT(nvme_ns2->bdev == NULL); 2491 2492 nvme_ctrlr->destruct = true; 2493 _nvme_ctrlr_destruct(nvme_ctrlr); 2494 2495 poll_threads(); 2496 2497 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2498 } 2499 2500 static void 2501 test_compare_ns(void) 2502 { 2503 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2504 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2505 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2506 2507 /* No IDs are defined. */ 2508 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2509 2510 /* Only EUI64 are defined and not matched. */ 2511 nsdata1.eui64 = 0xABCDEF0123456789; 2512 nsdata2.eui64 = 0xBBCDEF0123456789; 2513 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2514 2515 /* Only EUI64 are defined and matched. */ 2516 nsdata2.eui64 = 0xABCDEF0123456789; 2517 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2518 2519 /* Only NGUID are defined and not matched. */ 2520 nsdata1.eui64 = 0x0; 2521 nsdata2.eui64 = 0x0; 2522 nsdata1.nguid[0] = 0x12; 2523 nsdata2.nguid[0] = 0x10; 2524 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2525 2526 /* Only NGUID are defined and matched. */ 2527 nsdata2.nguid[0] = 0x12; 2528 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2529 2530 /* Only UUID are defined and not matched. */ 2531 nsdata1.nguid[0] = 0x0; 2532 nsdata2.nguid[0] = 0x0; 2533 ns1.uuid.u.raw[0] = 0xAA; 2534 ns2.uuid.u.raw[0] = 0xAB; 2535 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2536 2537 /* Only UUID are defined and matched. */ 2538 ns1.uuid.u.raw[0] = 0xAB; 2539 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2540 2541 /* All EUI64, NGUID, and UUID are defined and matched. */ 2542 nsdata1.eui64 = 0x123456789ABCDEF; 2543 nsdata2.eui64 = 0x123456789ABCDEF; 2544 nsdata1.nguid[15] = 0x34; 2545 nsdata2.nguid[15] = 0x34; 2546 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2547 } 2548 2549 static void 2550 test_init_ana_log_page(void) 2551 { 2552 struct spdk_nvme_transport_id trid = {}; 2553 struct spdk_nvme_ctrlr *ctrlr; 2554 struct nvme_ctrlr *nvme_ctrlr; 2555 const int STRING_SIZE = 32; 2556 const char *attached_names[STRING_SIZE]; 2557 int rc; 2558 2559 set_thread(0); 2560 2561 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2562 ut_init_trid(&trid); 2563 2564 ctrlr = ut_attach_ctrlr(&trid, 5, true, false); 2565 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2566 2567 ctrlr->ns[0].ana_state = SPDK_NVME_ANA_OPTIMIZED_STATE; 2568 ctrlr->ns[1].ana_state = SPDK_NVME_ANA_NON_OPTIMIZED_STATE; 2569 ctrlr->ns[2].ana_state = SPDK_NVME_ANA_INACCESSIBLE_STATE; 2570 ctrlr->ns[3].ana_state = SPDK_NVME_ANA_PERSISTENT_LOSS_STATE; 2571 ctrlr->ns[4].ana_state = SPDK_NVME_ANA_CHANGE_STATE; 2572 2573 g_ut_attach_ctrlr_status = 0; 2574 g_ut_attach_bdev_count = 5; 2575 2576 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 2577 attach_ctrlr_done, NULL, NULL, false); 2578 CU_ASSERT(rc == 0); 2579 2580 spdk_delay_us(1000); 2581 poll_threads(); 2582 2583 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2584 poll_threads(); 2585 2586 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2587 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2588 2589 CU_ASSERT(nvme_ctrlr->num_ns == 5); 2590 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1) != NULL); 2591 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2) != NULL); 2592 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3) != NULL); 2593 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4) != NULL); 2594 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5) != NULL); 2595 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->ana_state == SPDK_NVME_ANA_OPTIMIZED_STATE); 2596 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->ana_state == SPDK_NVME_ANA_NON_OPTIMIZED_STATE); 2597 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->ana_state == SPDK_NVME_ANA_INACCESSIBLE_STATE); 2598 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->ana_state == SPDK_NVME_ANA_PERSISTENT_LOSS_STATE); 2599 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->ana_state == SPDK_NVME_ANA_CHANGE_STATE); 2600 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev != NULL); 2601 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 2)->bdev != NULL); 2602 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 3)->bdev != NULL); 2603 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 4)->bdev != NULL); 2604 CU_ASSERT(nvme_ctrlr_get_ns(nvme_ctrlr, 5)->bdev != NULL); 2605 2606 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2607 CU_ASSERT(rc == 0); 2608 2609 poll_threads(); 2610 2611 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2612 } 2613 2614 static void 2615 init_accel(void) 2616 { 2617 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 2618 sizeof(int), "accel_p"); 2619 } 2620 2621 static void 2622 fini_accel(void) 2623 { 2624 spdk_io_device_unregister(g_accel_p, NULL); 2625 } 2626 2627 static void 2628 test_get_memory_domains(void) 2629 { 2630 struct nvme_ctrlr ctrlr = { .ctrlr = (struct spdk_nvme_ctrlr *) 0xbaadbeef }; 2631 struct nvme_ns ns = { .ctrlr = &ctrlr }; 2632 struct nvme_bdev nbdev = { .nvme_ns = &ns }; 2633 struct spdk_memory_domain *domains[2] = {}; 2634 int rc = 0; 2635 2636 /* nvme controller doesn't have memory domainы */ 2637 MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 0); 2638 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 2639 CU_ASSERT(rc == 0) 2640 2641 /* nvme controller has a memory domain */ 2642 MOCK_SET(spdk_nvme_ctrlr_get_memory_domain, 1); 2643 rc = bdev_nvme_get_memory_domains(&nbdev, domains, 2); 2644 CU_ASSERT(rc == 1); 2645 MOCK_CLEAR(spdk_nvme_ctrlr_get_memory_domain); 2646 } 2647 2648 static void 2649 test_reconnect_qpair(void) 2650 { 2651 struct spdk_nvme_transport_id trid = {}; 2652 struct spdk_nvme_ctrlr *ctrlr; 2653 struct nvme_ctrlr *nvme_ctrlr; 2654 const int STRING_SIZE = 32; 2655 const char *attached_names[STRING_SIZE]; 2656 struct nvme_bdev *bdev; 2657 struct spdk_io_channel *ch1, *ch2; 2658 struct nvme_bdev_channel *nbdev_ch1, *nbdev_ch2; 2659 struct nvme_ctrlr_channel *ctrlr_ch1, *ctrlr_ch2; 2660 int rc; 2661 2662 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2663 ut_init_trid(&trid); 2664 2665 set_thread(0); 2666 2667 ctrlr = ut_attach_ctrlr(&trid, 1, false, false); 2668 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2669 2670 g_ut_attach_ctrlr_status = 0; 2671 g_ut_attach_bdev_count = 1; 2672 2673 rc = bdev_nvme_create(&trid, "nvme0", attached_names, STRING_SIZE, 0, 2674 attach_ctrlr_done, NULL, NULL, false); 2675 CU_ASSERT(rc == 0); 2676 2677 spdk_delay_us(1000); 2678 poll_threads(); 2679 2680 nvme_ctrlr = nvme_ctrlr_get_by_name("nvme0"); 2681 SPDK_CU_ASSERT_FATAL(nvme_ctrlr != NULL); 2682 2683 bdev = nvme_ctrlr_get_ns(nvme_ctrlr, 1)->bdev; 2684 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2685 2686 ch1 = spdk_get_io_channel(bdev); 2687 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2688 2689 nbdev_ch1 = spdk_io_channel_get_ctx(ch1); 2690 ctrlr_ch1 = nbdev_ch1->ctrlr_ch; 2691 SPDK_CU_ASSERT_FATAL(ctrlr_ch1 != NULL); 2692 2693 set_thread(1); 2694 2695 ch2 = spdk_get_io_channel(bdev); 2696 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2697 2698 nbdev_ch2 = spdk_io_channel_get_ctx(ch2); 2699 ctrlr_ch2 = nbdev_ch2->ctrlr_ch; 2700 SPDK_CU_ASSERT_FATAL(ctrlr_ch2 != NULL); 2701 2702 /* If a qpair is disconnected, it is freed and then reconnected via 2703 * resetting the corresponding nvme_ctrlr. 2704 */ 2705 ctrlr_ch2->qpair->is_connected = false; 2706 ctrlr->is_failed = true; 2707 2708 poll_thread_times(1, 1); 2709 CU_ASSERT(ctrlr_ch1->qpair != NULL); 2710 CU_ASSERT(ctrlr_ch2->qpair == NULL); 2711 CU_ASSERT(nvme_ctrlr->resetting == true); 2712 2713 poll_thread_times(0, 1); 2714 poll_thread_times(1, 1); 2715 CU_ASSERT(ctrlr_ch1->qpair == NULL); 2716 CU_ASSERT(ctrlr_ch2->qpair == NULL); 2717 CU_ASSERT(ctrlr->is_failed == true); 2718 2719 poll_thread_times(1, 1); 2720 CU_ASSERT(ctrlr->is_failed == false); 2721 2722 poll_thread_times(0, 1); 2723 poll_thread_times(1, 1); 2724 CU_ASSERT(ctrlr_ch1->qpair != NULL); 2725 CU_ASSERT(ctrlr_ch2->qpair != NULL); 2726 CU_ASSERT(nvme_ctrlr->resetting == true); 2727 2728 poll_thread_times(1, 1); 2729 CU_ASSERT(nvme_ctrlr->resetting == false); 2730 2731 poll_threads(); 2732 2733 /* If a qpair is disconnected and resetting the corresponding nvme_ctrlr 2734 * fails, the qpair is just freed. 2735 */ 2736 ctrlr_ch2->qpair->is_connected = false; 2737 ctrlr->is_failed = true; 2738 ctrlr->fail_reset = true; 2739 2740 poll_thread_times(1, 1); 2741 CU_ASSERT(ctrlr_ch1->qpair != NULL); 2742 CU_ASSERT(ctrlr_ch2->qpair == NULL); 2743 CU_ASSERT(nvme_ctrlr->resetting == true); 2744 2745 poll_thread_times(0, 1); 2746 poll_thread_times(1, 1); 2747 CU_ASSERT(ctrlr_ch1->qpair == NULL); 2748 CU_ASSERT(ctrlr_ch2->qpair == NULL); 2749 CU_ASSERT(ctrlr->is_failed == true); 2750 2751 poll_thread_times(1, 1); 2752 CU_ASSERT(ctrlr->is_failed == true); 2753 CU_ASSERT(nvme_ctrlr->resetting == false); 2754 CU_ASSERT(ctrlr_ch1->qpair == NULL); 2755 CU_ASSERT(ctrlr_ch2->qpair == NULL); 2756 2757 poll_threads(); 2758 2759 spdk_put_io_channel(ch2); 2760 2761 set_thread(0); 2762 2763 spdk_put_io_channel(ch1); 2764 2765 poll_threads(); 2766 2767 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2768 CU_ASSERT(rc == 0); 2769 2770 poll_threads(); 2771 2772 CU_ASSERT(nvme_ctrlr_get_by_name("nvme0") == NULL); 2773 } 2774 2775 static void 2776 test_create_bdev_ctrlr(void) 2777 { 2778 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 2779 struct spdk_nvme_ctrlr *ctrlr1, *ctrlr2; 2780 struct nvme_bdev_ctrlr *nbdev_ctrlr; 2781 const int STRING_SIZE = 32; 2782 const char *attached_names[STRING_SIZE]; 2783 int rc; 2784 2785 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2786 ut_init_trid(&trid1); 2787 ut_init_trid2(&trid2); 2788 2789 ctrlr1 = ut_attach_ctrlr(&trid1, 0, true, true); 2790 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2791 2792 g_ut_attach_ctrlr_status = 0; 2793 g_ut_attach_bdev_count = 0; 2794 2795 rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0, 2796 attach_ctrlr_done, NULL, NULL, true); 2797 2798 spdk_delay_us(1000); 2799 poll_threads(); 2800 2801 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2802 poll_threads(); 2803 2804 nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0"); 2805 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 2806 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) != NULL); 2807 2808 /* cntlid is duplicated, and adding the second ctrlr should fail. */ 2809 g_ut_attach_ctrlr_status = -EINVAL; 2810 2811 ctrlr2 = ut_attach_ctrlr(&trid2, 0, true, true); 2812 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2813 2814 ctrlr2->cdata.cntlid = ctrlr1->cdata.cntlid; 2815 2816 rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0, 2817 attach_ctrlr_done, NULL, NULL, true); 2818 CU_ASSERT(rc == 0); 2819 2820 spdk_delay_us(1000); 2821 poll_threads(); 2822 2823 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2824 poll_threads(); 2825 2826 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) == NULL); 2827 2828 /* cntlid is not duplicated, and adding the third ctrlr should succeed. */ 2829 g_ut_attach_ctrlr_status = 0; 2830 2831 ctrlr2 = ut_attach_ctrlr(&trid2, 0, true, true); 2832 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2833 2834 rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0, 2835 attach_ctrlr_done, NULL, NULL, true); 2836 CU_ASSERT(rc == 0); 2837 2838 spdk_delay_us(1000); 2839 poll_threads(); 2840 2841 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2842 poll_threads(); 2843 2844 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL); 2845 2846 /* Delete two ctrlrs at once. */ 2847 rc = bdev_nvme_delete("nvme0", &g_any_trid); 2848 CU_ASSERT(rc == 0); 2849 2850 CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr); 2851 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) != NULL); 2852 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL); 2853 2854 poll_threads(); 2855 2856 CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL); 2857 2858 /* Add two ctrlrs and delete one by one. */ 2859 ctrlr1 = ut_attach_ctrlr(&trid1, 0, true, true); 2860 SPDK_CU_ASSERT_FATAL(ctrlr1 != NULL); 2861 2862 ctrlr2 = ut_attach_ctrlr(&trid2, 0, true, true); 2863 SPDK_CU_ASSERT_FATAL(ctrlr2 != NULL); 2864 2865 rc = bdev_nvme_create(&trid1, "nvme0", attached_names, STRING_SIZE, 0, 2866 attach_ctrlr_done, NULL, NULL, true); 2867 CU_ASSERT(rc == 0); 2868 2869 spdk_delay_us(1000); 2870 poll_threads(); 2871 2872 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2873 poll_threads(); 2874 2875 rc = bdev_nvme_create(&trid2, "nvme0", attached_names, STRING_SIZE, 0, 2876 attach_ctrlr_done, NULL, NULL, true); 2877 CU_ASSERT(rc == 0); 2878 2879 spdk_delay_us(1000); 2880 poll_threads(); 2881 2882 spdk_delay_us(g_opts.nvme_adminq_poll_period_us); 2883 poll_threads(); 2884 2885 nbdev_ctrlr = nvme_bdev_ctrlr_get("nvme0"); 2886 SPDK_CU_ASSERT_FATAL(nbdev_ctrlr != NULL); 2887 2888 rc = bdev_nvme_delete("nvme0", &trid1); 2889 CU_ASSERT(rc == 0); 2890 2891 CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr); 2892 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) != NULL); 2893 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL); 2894 2895 poll_threads(); 2896 2897 CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr); 2898 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) == NULL); 2899 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL); 2900 2901 rc = bdev_nvme_delete("nvme0", &trid2); 2902 CU_ASSERT(rc == 0); 2903 2904 CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == nbdev_ctrlr); 2905 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid1) == NULL); 2906 CU_ASSERT(nvme_bdev_ctrlr_get_ctrlr(nbdev_ctrlr, &trid2) != NULL); 2907 2908 poll_threads(); 2909 2910 CU_ASSERT(nvme_bdev_ctrlr_get("nvme0") == NULL); 2911 } 2912 2913 int 2914 main(int argc, const char **argv) 2915 { 2916 CU_pSuite suite = NULL; 2917 unsigned int num_failures; 2918 2919 CU_set_error_action(CUEA_ABORT); 2920 CU_initialize_registry(); 2921 2922 suite = CU_add_suite("nvme", NULL, NULL); 2923 2924 CU_ADD_TEST(suite, test_create_ctrlr); 2925 CU_ADD_TEST(suite, test_reset_ctrlr); 2926 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 2927 CU_ADD_TEST(suite, test_failover_ctrlr); 2928 CU_ADD_TEST(suite, test_pending_reset); 2929 CU_ADD_TEST(suite, test_attach_ctrlr); 2930 CU_ADD_TEST(suite, test_aer_cb); 2931 CU_ADD_TEST(suite, test_submit_nvme_cmd); 2932 CU_ADD_TEST(suite, test_add_remove_trid); 2933 CU_ADD_TEST(suite, test_abort); 2934 CU_ADD_TEST(suite, test_get_io_qpair); 2935 CU_ADD_TEST(suite, test_bdev_unregister); 2936 CU_ADD_TEST(suite, test_compare_ns); 2937 CU_ADD_TEST(suite, test_init_ana_log_page); 2938 CU_ADD_TEST(suite, test_get_memory_domains); 2939 CU_ADD_TEST(suite, test_reconnect_qpair); 2940 CU_ADD_TEST(suite, test_create_bdev_ctrlr); 2941 2942 CU_basic_set_mode(CU_BRM_VERBOSE); 2943 2944 allocate_threads(3); 2945 set_thread(0); 2946 bdev_nvme_library_init(); 2947 init_accel(); 2948 2949 CU_basic_run_tests(); 2950 2951 set_thread(0); 2952 bdev_nvme_library_fini(); 2953 fini_accel(); 2954 free_threads(); 2955 2956 num_failures = CU_get_number_of_failures(); 2957 CU_cleanup_registry(); 2958 2959 return num_failures; 2960 } 2961