1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "spdk/thread.h" 37 #include "spdk/bdev_module.h" 38 #include "spdk/bdev_module.h" 39 40 #include "common/lib/ut_multithread.c" 41 42 #include "bdev/nvme/bdev_nvme.c" 43 #include "bdev/nvme/common.c" 44 45 #include "unit/lib/json_mock.c" 46 47 static void *g_accel_p = (void *)0xdeadbeaf; 48 49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 50 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 51 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 52 spdk_nvme_remove_cb remove_cb), NULL); 53 54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 55 enum spdk_nvme_transport_type trtype)); 56 57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 58 NULL); 59 60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 61 62 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts, 63 size_t opts_size)); 64 65 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 66 struct spdk_nvme_transport_id *trid), 0); 67 68 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 69 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 70 71 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 72 73 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 74 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 75 76 struct spdk_io_channel * 77 spdk_accel_engine_get_io_channel(void) 78 { 79 return spdk_get_io_channel(g_accel_p); 80 } 81 82 void 83 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 84 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 85 { 86 /* Avoid warning that opts is used uninitialised */ 87 memset(opts, 0, opts_size); 88 } 89 90 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 91 (const struct spdk_nvme_ctrlr *ctrlr), 0); 92 93 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 94 (struct spdk_nvme_ctrlr *ctrlr), NULL); 95 96 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 97 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 98 99 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 100 uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 101 102 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 103 104 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 105 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 106 107 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 108 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 109 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 110 111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 112 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 113 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 114 115 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 116 117 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 118 119 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 120 121 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 122 123 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 124 125 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 126 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 127 128 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 129 130 DEFINE_STUB(spdk_nvme_ns_get_ana_state, enum spdk_nvme_ana_state, 131 (const struct spdk_nvme_ns *ns), 0); 132 133 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi, 134 (const struct spdk_nvme_ns *ns), 0); 135 136 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 137 char *name, size_t *size), 0); 138 139 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 140 (struct spdk_nvme_ns *ns), 0); 141 142 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 143 (const struct spdk_nvme_ctrlr *ctrlr), 0); 144 145 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 146 (struct spdk_nvme_ns *ns), 0); 147 148 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 149 (struct spdk_nvme_ns *ns), 0); 150 151 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 152 (struct spdk_nvme_ns *ns), 0); 153 154 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 155 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 156 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 157 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 158 159 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 160 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 161 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 162 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 163 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 164 165 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 166 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 167 void *payload, uint32_t payload_size, uint64_t slba, 168 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 169 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 170 171 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 172 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 173 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 174 175 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 176 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 177 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 178 179 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 180 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 181 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 182 183 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 184 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 185 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 186 187 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 188 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 189 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 190 191 DEFINE_STUB_V(spdk_bdev_module_finish_done, (void)); 192 193 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 194 195 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 196 197 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 198 199 DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, 200 struct nvme_bdev_ns *nvme_ns, struct nvme_async_probe_ctx *ctx)); 201 202 DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_bdev_ns *nvme_ns)); 203 204 DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w, 205 struct nvme_bdev_ns *nvme_ns)); 206 207 DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_io_path *ioch), 0); 208 209 DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_io_path *ioch)); 210 211 DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0); 212 213 DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)); 214 215 DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)); 216 217 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 218 struct iovec *iov, 219 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 220 221 222 struct ut_nvme_req { 223 uint16_t opc; 224 spdk_nvme_cmd_cb cb_fn; 225 void *cb_arg; 226 struct spdk_nvme_cpl cpl; 227 TAILQ_ENTRY(ut_nvme_req) tailq; 228 }; 229 230 struct spdk_nvme_ns { 231 struct spdk_nvme_ctrlr *ctrlr; 232 uint32_t id; 233 bool is_active; 234 struct spdk_uuid uuid; 235 }; 236 237 struct spdk_nvme_qpair { 238 struct spdk_nvme_ctrlr *ctrlr; 239 bool is_connected; 240 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 241 uint32_t num_outstanding_reqs; 242 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 243 struct spdk_nvme_poll_group *poll_group; 244 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 245 }; 246 247 struct spdk_nvme_ctrlr { 248 uint32_t num_ns; 249 struct spdk_nvme_ns *ns; 250 struct spdk_nvme_ns_data *nsdata; 251 struct spdk_nvme_qpair adminq; 252 struct spdk_nvme_ctrlr_data cdata; 253 bool attached; 254 bool is_failed; 255 bool fail_reset; 256 struct spdk_nvme_transport_id trid; 257 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 258 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 259 struct spdk_nvme_ctrlr_opts opts; 260 }; 261 262 struct spdk_nvme_poll_group { 263 void *ctx; 264 struct spdk_nvme_accel_fn_table accel_fn_table; 265 TAILQ_HEAD(, spdk_nvme_qpair) qpairs; 266 }; 267 268 struct spdk_nvme_probe_ctx { 269 struct spdk_nvme_transport_id trid; 270 void *cb_ctx; 271 spdk_nvme_attach_cb attach_cb; 272 struct spdk_nvme_ctrlr *init_ctrlr; 273 }; 274 275 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 276 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 277 g_ut_attached_ctrlrs); 278 static int g_ut_attach_ctrlr_status; 279 static size_t g_ut_attach_bdev_count; 280 static int g_ut_register_bdev_status; 281 282 static void 283 ut_init_trid(struct spdk_nvme_transport_id *trid) 284 { 285 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 286 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 287 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 288 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 289 } 290 291 static void 292 ut_init_trid2(struct spdk_nvme_transport_id *trid) 293 { 294 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 295 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 296 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 297 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 298 } 299 300 static void 301 ut_init_trid3(struct spdk_nvme_transport_id *trid) 302 { 303 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 304 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 305 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 306 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 307 } 308 309 static int 310 cmp_int(int a, int b) 311 { 312 return a - b; 313 } 314 315 int 316 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 317 const struct spdk_nvme_transport_id *trid2) 318 { 319 int cmp; 320 321 /* We assume trtype is TCP for now. */ 322 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 323 324 cmp = cmp_int(trid1->trtype, trid2->trtype); 325 if (cmp) { 326 return cmp; 327 } 328 329 cmp = strcasecmp(trid1->traddr, trid2->traddr); 330 if (cmp) { 331 return cmp; 332 } 333 334 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 335 if (cmp) { 336 return cmp; 337 } 338 339 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 340 if (cmp) { 341 return cmp; 342 } 343 344 cmp = strcmp(trid1->subnqn, trid2->subnqn); 345 if (cmp) { 346 return cmp; 347 } 348 349 return 0; 350 } 351 352 static struct spdk_nvme_ctrlr * 353 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns) 354 { 355 struct spdk_nvme_ctrlr *ctrlr; 356 uint32_t i; 357 358 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 359 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 360 /* There is a ctrlr whose trid matches. */ 361 return NULL; 362 } 363 } 364 365 ctrlr = calloc(1, sizeof(*ctrlr)); 366 if (ctrlr == NULL) { 367 return NULL; 368 } 369 370 ctrlr->attached = true; 371 ctrlr->adminq.ctrlr = ctrlr; 372 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 373 374 if (num_ns != 0) { 375 ctrlr->num_ns = num_ns; 376 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 377 if (ctrlr->ns == NULL) { 378 free(ctrlr); 379 return NULL; 380 } 381 382 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 383 if (ctrlr->nsdata == NULL) { 384 free(ctrlr->ns); 385 free(ctrlr); 386 return NULL; 387 } 388 389 for (i = 0; i < num_ns; i++) { 390 ctrlr->ns[i].id = i + 1; 391 ctrlr->ns[i].ctrlr = ctrlr; 392 ctrlr->ns[i].is_active = true; 393 ctrlr->nsdata[i].nsze = 1024; 394 } 395 } 396 397 ctrlr->trid = *trid; 398 TAILQ_INIT(&ctrlr->active_io_qpairs); 399 400 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 401 402 return ctrlr; 403 } 404 405 static void 406 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 407 { 408 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 409 410 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 411 free(ctrlr->nsdata); 412 free(ctrlr->ns); 413 free(ctrlr); 414 } 415 416 static int 417 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 418 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 419 { 420 struct ut_nvme_req *req; 421 422 req = calloc(1, sizeof(*req)); 423 if (req == NULL) { 424 return -ENOMEM; 425 } 426 427 req->opc = opc; 428 req->cb_fn = cb_fn; 429 req->cb_arg = cb_arg; 430 431 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 432 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 433 434 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 435 qpair->num_outstanding_reqs++; 436 437 return 0; 438 } 439 440 static void 441 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 442 { 443 bdev_io->u.bdev.iovs = &bdev_io->iov; 444 bdev_io->u.bdev.iovcnt = 1; 445 446 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 447 bdev_io->iov.iov_len = 4096; 448 } 449 450 static void 451 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 452 { 453 if (ctrlr->is_failed) { 454 free(ctrlr); 455 return; 456 } 457 458 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 459 460 if (probe_ctx->attach_cb) { 461 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 462 } 463 } 464 465 int 466 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 467 { 468 struct spdk_nvme_ctrlr *ctrlr, *tmp; 469 470 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 471 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 472 continue; 473 } 474 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 475 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 476 } 477 478 free(probe_ctx); 479 480 return 0; 481 } 482 483 struct spdk_nvme_probe_ctx * 484 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 485 const struct spdk_nvme_ctrlr_opts *opts, 486 spdk_nvme_attach_cb attach_cb) 487 { 488 struct spdk_nvme_probe_ctx *probe_ctx; 489 490 if (trid == NULL) { 491 return NULL; 492 } 493 494 probe_ctx = calloc(1, sizeof(*probe_ctx)); 495 if (probe_ctx == NULL) { 496 return NULL; 497 } 498 499 probe_ctx->trid = *trid; 500 probe_ctx->cb_ctx = (void *)opts; 501 probe_ctx->attach_cb = attach_cb; 502 503 return probe_ctx; 504 } 505 506 int 507 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 508 { 509 if (ctrlr->attached) { 510 ut_detach_ctrlr(ctrlr); 511 } 512 513 return 0; 514 } 515 516 const struct spdk_nvme_ctrlr_data * 517 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 518 { 519 return &ctrlr->cdata; 520 } 521 522 uint32_t 523 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 524 { 525 return ctrlr->num_ns; 526 } 527 528 struct spdk_nvme_ns * 529 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 530 { 531 if (nsid < 1 || nsid > ctrlr->num_ns) { 532 return NULL; 533 } 534 535 return &ctrlr->ns[nsid - 1]; 536 } 537 538 bool 539 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 540 { 541 if (nsid < 1 || nsid > ctrlr->num_ns) { 542 return false; 543 } 544 545 return ctrlr->ns[nsid - 1].is_active; 546 } 547 548 union spdk_nvme_csts_register 549 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 550 { 551 union spdk_nvme_csts_register csts; 552 553 csts.raw = 0; 554 555 return csts; 556 } 557 558 union spdk_nvme_vs_register 559 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 560 { 561 union spdk_nvme_vs_register vs; 562 563 vs.raw = 0; 564 565 return vs; 566 } 567 568 struct spdk_nvme_qpair * 569 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 570 const struct spdk_nvme_io_qpair_opts *user_opts, 571 size_t opts_size) 572 { 573 struct spdk_nvme_qpair *qpair; 574 575 qpair = calloc(1, sizeof(*qpair)); 576 if (qpair == NULL) { 577 return NULL; 578 } 579 580 qpair->ctrlr = ctrlr; 581 TAILQ_INIT(&qpair->outstanding_reqs); 582 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 583 584 return qpair; 585 } 586 587 int 588 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 589 struct spdk_nvme_qpair *qpair) 590 { 591 if (qpair->is_connected) { 592 return -EISCONN; 593 } 594 595 qpair->is_connected = true; 596 597 return 0; 598 } 599 600 int 601 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair) 602 { 603 struct spdk_nvme_ctrlr *ctrlr; 604 605 ctrlr = qpair->ctrlr; 606 607 if (ctrlr->is_failed) { 608 return -ENXIO; 609 } 610 qpair->is_connected = true; 611 612 return 0; 613 } 614 615 void 616 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 617 { 618 qpair->is_connected = false; 619 } 620 621 int 622 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 623 { 624 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 625 626 qpair->is_connected = false; 627 628 if (qpair->poll_group != NULL) { 629 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 630 } 631 632 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 633 634 CU_ASSERT(qpair->num_outstanding_reqs == 0); 635 636 free(qpair); 637 638 return 0; 639 } 640 641 int 642 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) 643 { 644 if (ctrlr->fail_reset) { 645 return -EIO; 646 } 647 648 ctrlr->is_failed = false; 649 650 return 0; 651 } 652 653 void 654 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 655 { 656 ctrlr->is_failed = true; 657 } 658 659 int 660 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 661 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 662 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 663 { 664 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 665 } 666 667 int 668 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 669 void *cmd_cb_arg, 670 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 671 { 672 struct ut_nvme_req *req = NULL, *abort_req; 673 674 if (qpair == NULL) { 675 qpair = &ctrlr->adminq; 676 } 677 678 abort_req = calloc(1, sizeof(*abort_req)); 679 if (abort_req == NULL) { 680 return -ENOMEM; 681 } 682 683 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 684 if (req->cb_arg == cmd_cb_arg) { 685 break; 686 } 687 } 688 689 if (req == NULL) { 690 free(abort_req); 691 return -ENOENT; 692 } 693 694 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 695 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 696 697 abort_req->opc = SPDK_NVME_OPC_ABORT; 698 abort_req->cb_fn = cb_fn; 699 abort_req->cb_arg = cb_arg; 700 701 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 702 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 703 abort_req->cpl.cdw0 = 0; 704 705 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 706 ctrlr->adminq.num_outstanding_reqs++; 707 708 return 0; 709 } 710 711 int32_t 712 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 713 { 714 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 715 } 716 717 uint32_t 718 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 719 { 720 return ns->id; 721 } 722 723 struct spdk_nvme_ctrlr * 724 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 725 { 726 return ns->ctrlr; 727 } 728 729 static inline struct spdk_nvme_ns_data * 730 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 731 { 732 return &ns->ctrlr->nsdata[ns->id - 1]; 733 } 734 735 const struct spdk_nvme_ns_data * 736 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 737 { 738 return _nvme_ns_get_data(ns); 739 } 740 741 uint64_t 742 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 743 { 744 return _nvme_ns_get_data(ns)->nsze; 745 } 746 747 const struct spdk_uuid * 748 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 749 { 750 return &ns->uuid; 751 } 752 753 int 754 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 755 void *metadata, uint64_t lba, uint32_t lba_count, 756 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 757 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 758 { 759 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 760 } 761 762 int 763 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 764 void *buffer, void *metadata, uint64_t lba, 765 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 766 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 767 { 768 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 769 } 770 771 int 772 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 773 uint64_t lba, uint32_t lba_count, 774 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 775 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 776 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 777 uint16_t apptag_mask, uint16_t apptag) 778 { 779 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 780 } 781 782 int 783 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 784 uint64_t lba, uint32_t lba_count, 785 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 786 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 787 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 788 uint16_t apptag_mask, uint16_t apptag) 789 { 790 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 791 } 792 793 int 794 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 795 uint64_t lba, uint32_t lba_count, 796 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 797 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 798 spdk_nvme_req_next_sge_cb next_sge_fn, 799 void *metadata, uint16_t apptag_mask, uint16_t apptag) 800 { 801 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 802 } 803 804 int 805 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 806 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 807 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 808 { 809 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 810 } 811 812 struct spdk_nvme_poll_group * 813 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 814 { 815 struct spdk_nvme_poll_group *group; 816 817 group = calloc(1, sizeof(*group)); 818 if (group == NULL) { 819 return NULL; 820 } 821 822 group->ctx = ctx; 823 if (table != NULL) { 824 group->accel_fn_table = *table; 825 } 826 TAILQ_INIT(&group->qpairs); 827 828 return group; 829 } 830 831 int 832 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 833 { 834 if (!TAILQ_EMPTY(&group->qpairs)) { 835 return -EBUSY; 836 } 837 838 free(group); 839 840 return 0; 841 } 842 843 int32_t 844 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 845 uint32_t max_completions) 846 { 847 struct ut_nvme_req *req, *tmp; 848 uint32_t num_completions = 0; 849 850 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 851 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 852 qpair->num_outstanding_reqs--; 853 854 req->cb_fn(req->cb_arg, &req->cpl); 855 856 free(req); 857 num_completions++; 858 } 859 860 return num_completions; 861 } 862 863 int64_t 864 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 865 uint32_t completions_per_qpair, 866 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 867 { 868 struct spdk_nvme_qpair *qpair, *tmp_qpair; 869 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 870 871 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 872 873 if (disconnected_qpair_cb == NULL) { 874 return -EINVAL; 875 } 876 877 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) { 878 if (qpair->is_connected) { 879 local_completions = spdk_nvme_qpair_process_completions(qpair, 880 completions_per_qpair); 881 if (local_completions < 0 && error_reason == 0) { 882 error_reason = local_completions; 883 } else { 884 num_completions += local_completions; 885 assert(num_completions >= 0); 886 } 887 } 888 } 889 890 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) { 891 if (!qpair->is_connected) { 892 disconnected_qpair_cb(qpair, group->ctx); 893 } 894 } 895 896 return error_reason ? error_reason : num_completions; 897 } 898 899 int 900 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 901 struct spdk_nvme_qpair *qpair) 902 { 903 CU_ASSERT(!qpair->is_connected); 904 905 qpair->poll_group = group; 906 TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq); 907 908 return 0; 909 } 910 911 int 912 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 913 struct spdk_nvme_qpair *qpair) 914 { 915 CU_ASSERT(!qpair->is_connected); 916 917 TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq); 918 919 return 0; 920 } 921 922 int 923 spdk_bdev_register(struct spdk_bdev *bdev) 924 { 925 return g_ut_register_bdev_status; 926 } 927 928 void 929 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 930 { 931 int rc; 932 933 rc = bdev->fn_table->destruct(bdev->ctxt); 934 if (rc <= 0 && cb_fn != NULL) { 935 cb_fn(cb_arg, rc); 936 } 937 } 938 939 int 940 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 941 { 942 bdev->blockcnt = size; 943 944 return 0; 945 } 946 947 struct spdk_io_channel * 948 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 949 { 950 return (struct spdk_io_channel *)bdev_io->internal.ch; 951 } 952 953 void 954 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 955 { 956 bdev_io->internal.status = status; 957 bdev_io->internal.in_submit_request = false; 958 } 959 960 void 961 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 962 { 963 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 964 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 965 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 966 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 967 } else { 968 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 969 } 970 971 bdev_io->internal.error.nvme.cdw0 = cdw0; 972 bdev_io->internal.error.nvme.sct = sct; 973 bdev_io->internal.error.nvme.sc = sc; 974 975 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 976 } 977 978 void 979 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 980 { 981 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 982 983 ut_bdev_io_set_buf(bdev_io); 984 985 cb(ch, bdev_io, true); 986 } 987 988 static void 989 test_create_ctrlr(void) 990 { 991 struct spdk_nvme_transport_id trid = {}; 992 struct spdk_nvme_ctrlr ctrlr = {}; 993 int rc; 994 995 ut_init_trid(&trid); 996 997 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 998 999 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL); 1000 1001 rc = bdev_nvme_delete("nvme0", NULL); 1002 CU_ASSERT(rc == 0); 1003 1004 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL); 1005 1006 poll_threads(); 1007 1008 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1009 } 1010 1011 static void 1012 test_reset_ctrlr(void) 1013 { 1014 struct spdk_nvme_transport_id trid = {}; 1015 struct spdk_nvme_ctrlr ctrlr = {}; 1016 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1017 struct nvme_bdev_ctrlr_trid *curr_trid; 1018 struct spdk_io_channel *ch1, *ch2; 1019 struct nvme_io_path *io_path1, *io_path2; 1020 int rc; 1021 1022 ut_init_trid(&trid); 1023 TAILQ_INIT(&ctrlr.active_io_qpairs); 1024 1025 set_thread(0); 1026 1027 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1028 1029 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1030 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1031 1032 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1033 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1034 1035 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1036 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1037 1038 io_path1 = spdk_io_channel_get_ctx(ch1); 1039 CU_ASSERT(io_path1->qpair != NULL); 1040 1041 set_thread(1); 1042 1043 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1044 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1045 1046 io_path2 = spdk_io_channel_get_ctx(ch2); 1047 CU_ASSERT(io_path2->qpair != NULL); 1048 1049 /* Reset starts from thread 1. */ 1050 set_thread(1); 1051 1052 /* Case 1: ctrlr is already being destructed. */ 1053 nvme_bdev_ctrlr->destruct = true; 1054 1055 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1056 CU_ASSERT(rc == -EBUSY); 1057 1058 /* Case 2: reset is in progress. */ 1059 nvme_bdev_ctrlr->destruct = false; 1060 nvme_bdev_ctrlr->resetting = true; 1061 1062 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1063 CU_ASSERT(rc == -EAGAIN); 1064 1065 /* Case 3: reset completes successfully. */ 1066 nvme_bdev_ctrlr->resetting = false; 1067 curr_trid->is_failed = true; 1068 ctrlr.is_failed = true; 1069 1070 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1071 CU_ASSERT(rc == 0); 1072 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1073 CU_ASSERT(io_path1->qpair != NULL); 1074 CU_ASSERT(io_path2->qpair != NULL); 1075 1076 poll_thread_times(0, 1); 1077 CU_ASSERT(io_path1->qpair == NULL); 1078 CU_ASSERT(io_path2->qpair != NULL); 1079 1080 poll_thread_times(1, 1); 1081 CU_ASSERT(io_path1->qpair == NULL); 1082 CU_ASSERT(io_path2->qpair == NULL); 1083 CU_ASSERT(ctrlr.is_failed == true); 1084 1085 poll_thread_times(1, 1); 1086 CU_ASSERT(ctrlr.is_failed == false); 1087 1088 poll_thread_times(0, 1); 1089 CU_ASSERT(io_path1->qpair != NULL); 1090 CU_ASSERT(io_path2->qpair == NULL); 1091 1092 poll_thread_times(1, 1); 1093 CU_ASSERT(io_path1->qpair != NULL); 1094 CU_ASSERT(io_path2->qpair != NULL); 1095 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1096 CU_ASSERT(curr_trid->is_failed == true); 1097 1098 poll_thread_times(1, 1); 1099 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1100 CU_ASSERT(curr_trid->is_failed == false); 1101 1102 spdk_put_io_channel(ch2); 1103 1104 set_thread(0); 1105 1106 spdk_put_io_channel(ch1); 1107 1108 poll_threads(); 1109 1110 rc = bdev_nvme_delete("nvme0", NULL); 1111 CU_ASSERT(rc == 0); 1112 1113 poll_threads(); 1114 1115 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1116 } 1117 1118 static void 1119 test_race_between_reset_and_destruct_ctrlr(void) 1120 { 1121 struct spdk_nvme_transport_id trid = {}; 1122 struct spdk_nvme_ctrlr ctrlr = {}; 1123 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1124 struct spdk_io_channel *ch1, *ch2; 1125 int rc; 1126 1127 ut_init_trid(&trid); 1128 TAILQ_INIT(&ctrlr.active_io_qpairs); 1129 1130 set_thread(0); 1131 1132 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1133 1134 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1135 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1136 1137 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1138 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1139 1140 set_thread(1); 1141 1142 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1143 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1144 1145 /* Reset starts from thread 1. */ 1146 set_thread(1); 1147 1148 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1149 CU_ASSERT(rc == 0); 1150 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1151 1152 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1153 set_thread(0); 1154 1155 rc = bdev_nvme_delete("nvme0", NULL); 1156 CU_ASSERT(rc == 0); 1157 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1158 CU_ASSERT(nvme_bdev_ctrlr->destruct == true); 1159 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1160 1161 poll_threads(); 1162 1163 /* Reset completed but ctrlr is not still destructed yet. */ 1164 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1165 CU_ASSERT(nvme_bdev_ctrlr->destruct == true); 1166 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1167 1168 /* New reset request is rejected. */ 1169 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1170 CU_ASSERT(rc == -EBUSY); 1171 1172 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1173 * However there are two channels and destruct is not completed yet. 1174 */ 1175 poll_threads(); 1176 1177 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1178 1179 set_thread(0); 1180 1181 spdk_put_io_channel(ch1); 1182 1183 set_thread(1); 1184 1185 spdk_put_io_channel(ch2); 1186 1187 poll_threads(); 1188 1189 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1190 } 1191 1192 static void 1193 test_failover_ctrlr(void) 1194 { 1195 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1196 struct spdk_nvme_ctrlr ctrlr = {}; 1197 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1198 struct nvme_bdev_ctrlr_trid *curr_trid, *next_trid; 1199 struct spdk_io_channel *ch1, *ch2; 1200 int rc; 1201 1202 ut_init_trid(&trid1); 1203 ut_init_trid2(&trid2); 1204 TAILQ_INIT(&ctrlr.active_io_qpairs); 1205 1206 set_thread(0); 1207 1208 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1209 1210 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1211 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1212 1213 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1214 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1215 1216 set_thread(1); 1217 1218 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1219 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1220 1221 /* First, test one trid case. */ 1222 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1223 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1224 1225 /* Failover starts from thread 1. */ 1226 set_thread(1); 1227 1228 /* Case 1: ctrlr is already being destructed. */ 1229 nvme_bdev_ctrlr->destruct = true; 1230 1231 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1232 CU_ASSERT(rc == 0); 1233 CU_ASSERT(curr_trid->is_failed == false); 1234 1235 /* Case 2: reset is in progress. */ 1236 nvme_bdev_ctrlr->destruct = false; 1237 nvme_bdev_ctrlr->resetting = true; 1238 1239 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1240 CU_ASSERT(rc == 0); 1241 1242 /* Case 3: failover is in progress. */ 1243 nvme_bdev_ctrlr->failover_in_progress = true; 1244 1245 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1246 CU_ASSERT(rc == 0); 1247 CU_ASSERT(curr_trid->is_failed == false); 1248 1249 /* Case 4: reset completes successfully. */ 1250 nvme_bdev_ctrlr->resetting = false; 1251 nvme_bdev_ctrlr->failover_in_progress = false; 1252 1253 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1254 CU_ASSERT(rc == 0); 1255 1256 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1257 CU_ASSERT(curr_trid->is_failed == true); 1258 1259 poll_threads(); 1260 1261 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1262 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1263 1264 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1265 CU_ASSERT(curr_trid->is_failed == false); 1266 1267 set_thread(0); 1268 1269 /* Second, test two trids case. */ 1270 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2, NULL); 1271 1272 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1273 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1274 CU_ASSERT(&curr_trid->trid == nvme_bdev_ctrlr->connected_trid); 1275 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1276 1277 /* Failover starts from thread 1. */ 1278 set_thread(1); 1279 1280 /* Case 5: reset is in progress. */ 1281 nvme_bdev_ctrlr->resetting = true; 1282 1283 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1284 CU_ASSERT(rc == -EAGAIN); 1285 1286 /* Case 5: failover is in progress. */ 1287 nvme_bdev_ctrlr->failover_in_progress = true; 1288 1289 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1290 CU_ASSERT(rc == 0); 1291 1292 /* Case 6: failover completes successfully. */ 1293 nvme_bdev_ctrlr->resetting = false; 1294 nvme_bdev_ctrlr->failover_in_progress = false; 1295 1296 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1297 CU_ASSERT(rc == 0); 1298 1299 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1300 CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == true); 1301 1302 next_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1303 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1304 CU_ASSERT(next_trid != curr_trid); 1305 CU_ASSERT(&next_trid->trid == nvme_bdev_ctrlr->connected_trid); 1306 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1307 1308 poll_threads(); 1309 1310 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1311 CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == false); 1312 1313 spdk_put_io_channel(ch2); 1314 1315 set_thread(0); 1316 1317 spdk_put_io_channel(ch1); 1318 1319 poll_threads(); 1320 1321 rc = bdev_nvme_delete("nvme0", NULL); 1322 CU_ASSERT(rc == 0); 1323 1324 poll_threads(); 1325 1326 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1327 } 1328 1329 static void 1330 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1331 { 1332 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1333 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1334 } 1335 1336 static void 1337 test_pending_reset(void) 1338 { 1339 struct spdk_nvme_transport_id trid = {}; 1340 struct spdk_nvme_host_id hostid = {}; 1341 struct spdk_nvme_ctrlr *ctrlr; 1342 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1343 const int STRING_SIZE = 32; 1344 const char *attached_names[STRING_SIZE]; 1345 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1346 struct spdk_io_channel *ch1, *ch2; 1347 struct nvme_io_path *io_path1, *io_path2; 1348 int rc; 1349 1350 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1351 ut_init_trid(&trid); 1352 1353 first_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 1354 SPDK_CU_ASSERT_FATAL(first_bdev_io != NULL); 1355 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1356 1357 second_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 1358 SPDK_CU_ASSERT_FATAL(second_bdev_io != NULL); 1359 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1360 1361 set_thread(0); 1362 1363 ctrlr = ut_attach_ctrlr(&trid, 0); 1364 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1365 1366 g_ut_attach_ctrlr_status = 0; 1367 g_ut_attach_bdev_count = 0; 1368 1369 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1370 attach_ctrlr_done, NULL, NULL); 1371 CU_ASSERT(rc == 0); 1372 1373 spdk_delay_us(1000); 1374 poll_threads(); 1375 1376 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1377 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1378 1379 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1380 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1381 1382 io_path1 = spdk_io_channel_get_ctx(ch1); 1383 1384 set_thread(1); 1385 1386 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1387 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1388 1389 io_path2 = spdk_io_channel_get_ctx(ch2); 1390 1391 /* The first reset request is submitted on thread 1, and the second reset request 1392 * is submitted on thread 0 while processing the first request. 1393 */ 1394 rc = bdev_nvme_reset(io_path2, first_bdev_io); 1395 CU_ASSERT(rc == 0); 1396 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1397 CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets)); 1398 1399 set_thread(0); 1400 1401 rc = bdev_nvme_reset(io_path1, second_bdev_io); 1402 CU_ASSERT(rc == 0); 1403 CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io); 1404 1405 poll_threads(); 1406 1407 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1408 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1409 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1410 1411 /* The first reset request is submitted on thread 1, and the second reset request 1412 * is submitted on thread 0 while processing the first request. 1413 * 1414 * The difference from the above scenario is that the controller is removed while 1415 * processing the first request. Hence both reset requests should fail. 1416 */ 1417 set_thread(1); 1418 1419 rc = bdev_nvme_reset(io_path2, first_bdev_io); 1420 CU_ASSERT(rc == 0); 1421 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1422 CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets)); 1423 1424 set_thread(0); 1425 1426 rc = bdev_nvme_reset(io_path1, second_bdev_io); 1427 CU_ASSERT(rc == 0); 1428 CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io); 1429 1430 ctrlr->fail_reset = true; 1431 1432 poll_threads(); 1433 1434 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1435 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1436 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1437 1438 spdk_put_io_channel(ch1); 1439 1440 set_thread(1); 1441 1442 spdk_put_io_channel(ch2); 1443 1444 poll_threads(); 1445 1446 set_thread(0); 1447 1448 rc = bdev_nvme_delete("nvme0", NULL); 1449 CU_ASSERT(rc == 0); 1450 1451 poll_threads(); 1452 1453 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1454 1455 free(first_bdev_io); 1456 free(second_bdev_io); 1457 } 1458 1459 static void 1460 test_attach_ctrlr(void) 1461 { 1462 struct spdk_nvme_transport_id trid = {}; 1463 struct spdk_nvme_host_id hostid = {}; 1464 struct spdk_nvme_ctrlr *ctrlr; 1465 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1466 const int STRING_SIZE = 32; 1467 const char *attached_names[STRING_SIZE]; 1468 struct nvme_bdev *nbdev; 1469 int rc; 1470 1471 set_thread(0); 1472 1473 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1474 ut_init_trid(&trid); 1475 1476 /* If ctrlr fails, no nvme_bdev_ctrlr is created. Failed ctrlr is removed 1477 * by probe polling. 1478 */ 1479 ctrlr = ut_attach_ctrlr(&trid, 0); 1480 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1481 1482 ctrlr->is_failed = true; 1483 g_ut_attach_ctrlr_status = -EIO; 1484 g_ut_attach_bdev_count = 0; 1485 1486 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1487 attach_ctrlr_done, NULL, NULL); 1488 CU_ASSERT(rc == 0); 1489 1490 spdk_delay_us(1000); 1491 poll_threads(); 1492 1493 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1494 1495 /* If ctrlr has no namespace, one nvme_bdev_ctrlr with no namespace is created */ 1496 ctrlr = ut_attach_ctrlr(&trid, 0); 1497 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1498 1499 g_ut_attach_ctrlr_status = 0; 1500 1501 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1502 attach_ctrlr_done, NULL, NULL); 1503 CU_ASSERT(rc == 0); 1504 1505 spdk_delay_us(1000); 1506 poll_threads(); 1507 1508 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1509 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1510 CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); 1511 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 0); 1512 1513 rc = bdev_nvme_delete("nvme0", NULL); 1514 CU_ASSERT(rc == 0); 1515 1516 poll_threads(); 1517 1518 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1519 1520 /* If ctrlr has one namespace, one nvme_bdev_ctrlr with one namespace and 1521 * one nvme_bdev is created. 1522 */ 1523 ctrlr = ut_attach_ctrlr(&trid, 1); 1524 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1525 1526 g_ut_attach_bdev_count = 1; 1527 1528 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1529 attach_ctrlr_done, NULL, NULL); 1530 CU_ASSERT(rc == 0); 1531 1532 spdk_delay_us(1000); 1533 poll_threads(); 1534 1535 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1536 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1537 CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); 1538 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1); 1539 1540 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 1541 attached_names[0] = NULL; 1542 1543 nbdev = nvme_bdev_ctrlr->namespaces[0]->bdev; 1544 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 1545 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 1546 1547 rc = bdev_nvme_delete("nvme0", NULL); 1548 CU_ASSERT(rc == 0); 1549 1550 poll_threads(); 1551 1552 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1553 1554 /* Ctrlr has one namespace but one nvme_bdev_ctrlr with no namespace is 1555 * created because creating one nvme_bdev failed. 1556 */ 1557 ctrlr = ut_attach_ctrlr(&trid, 1); 1558 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1559 1560 g_ut_register_bdev_status = -EINVAL; 1561 g_ut_attach_bdev_count = 0; 1562 1563 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1564 attach_ctrlr_done, NULL, NULL); 1565 CU_ASSERT(rc == 0); 1566 1567 spdk_delay_us(1000); 1568 poll_threads(); 1569 1570 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1571 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1572 CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); 1573 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1); 1574 1575 CU_ASSERT(attached_names[0] == NULL); 1576 1577 rc = bdev_nvme_delete("nvme0", NULL); 1578 CU_ASSERT(rc == 0); 1579 1580 poll_threads(); 1581 1582 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1583 1584 g_ut_register_bdev_status = 0; 1585 } 1586 1587 static void 1588 test_reconnect_qpair(void) 1589 { 1590 struct spdk_nvme_transport_id trid = {}; 1591 struct spdk_nvme_ctrlr ctrlr = {}; 1592 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1593 struct spdk_io_channel *ch; 1594 struct nvme_io_path *io_path; 1595 int rc; 1596 1597 set_thread(0); 1598 1599 ut_init_trid(&trid); 1600 TAILQ_INIT(&ctrlr.active_io_qpairs); 1601 1602 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1603 1604 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1605 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1606 1607 ch = spdk_get_io_channel(nvme_bdev_ctrlr); 1608 SPDK_CU_ASSERT_FATAL(ch != NULL); 1609 1610 io_path = spdk_io_channel_get_ctx(ch); 1611 CU_ASSERT(io_path->qpair != NULL); 1612 CU_ASSERT(io_path->group != NULL); 1613 CU_ASSERT(io_path->group->group != NULL); 1614 CU_ASSERT(io_path->group->poller != NULL); 1615 1616 /* Test if the disconnected qpair is reconnected. */ 1617 io_path->qpair->is_connected = false; 1618 1619 poll_threads(); 1620 1621 CU_ASSERT(io_path->qpair->is_connected == true); 1622 1623 /* If the ctrlr is failed, reconnecting qpair should fail too. */ 1624 io_path->qpair->is_connected = false; 1625 ctrlr.is_failed = true; 1626 1627 poll_threads(); 1628 1629 CU_ASSERT(io_path->qpair->is_connected == false); 1630 1631 spdk_put_io_channel(ch); 1632 1633 poll_threads(); 1634 1635 rc = bdev_nvme_delete("nvme0", NULL); 1636 CU_ASSERT(rc == 0); 1637 1638 poll_threads(); 1639 1640 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1641 } 1642 1643 static void 1644 test_aer_cb(void) 1645 { 1646 struct spdk_nvme_transport_id trid = {}; 1647 struct spdk_nvme_host_id hostid = {}; 1648 struct spdk_nvme_ctrlr *ctrlr; 1649 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1650 struct nvme_bdev *bdev; 1651 const int STRING_SIZE = 32; 1652 const char *attached_names[STRING_SIZE]; 1653 union spdk_nvme_async_event_completion event = {}; 1654 struct spdk_nvme_cpl cpl = {}; 1655 int rc; 1656 1657 set_thread(0); 1658 1659 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1660 ut_init_trid(&trid); 1661 1662 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 1663 * namespaces are populated. 1664 */ 1665 ctrlr = ut_attach_ctrlr(&trid, 4); 1666 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1667 1668 ctrlr->ns[0].is_active = false; 1669 1670 g_ut_attach_ctrlr_status = 0; 1671 g_ut_attach_bdev_count = 3; 1672 1673 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1674 attach_ctrlr_done, NULL, NULL); 1675 CU_ASSERT(rc == 0); 1676 1677 spdk_delay_us(1000); 1678 poll_threads(); 1679 1680 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1681 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1682 1683 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 4); 1684 CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == false); 1685 CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true); 1686 CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == true); 1687 CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true); 1688 1689 bdev = nvme_bdev_ctrlr->namespaces[3]->bdev; 1690 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1691 CU_ASSERT(bdev->disk.blockcnt == 1024); 1692 1693 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 1694 * change the size of the 4th namespace. 1695 */ 1696 ctrlr->ns[0].is_active = true; 1697 ctrlr->ns[2].is_active = false; 1698 ctrlr->nsdata[3].nsze = 2048; 1699 1700 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 1701 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 1702 cpl.cdw0 = event.raw; 1703 1704 aer_cb(nvme_bdev_ctrlr, &cpl); 1705 1706 CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == true); 1707 CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true); 1708 CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == false); 1709 CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true); 1710 CU_ASSERT(bdev->disk.blockcnt == 2048); 1711 1712 rc = bdev_nvme_delete("nvme0", NULL); 1713 CU_ASSERT(rc == 0); 1714 1715 poll_threads(); 1716 1717 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1718 } 1719 1720 static void 1721 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1722 enum spdk_bdev_io_type io_type) 1723 { 1724 struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); 1725 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; 1726 struct spdk_nvme_ns *ns = NULL; 1727 struct spdk_nvme_qpair *qpair = NULL; 1728 1729 CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); 1730 1731 bdev_io->type = io_type; 1732 bdev_io->internal.in_submit_request = true; 1733 1734 bdev_nvme_submit_request(ch, bdev_io); 1735 1736 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1737 CU_ASSERT(qpair->num_outstanding_reqs == 1); 1738 1739 poll_threads(); 1740 1741 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1742 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1743 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1744 } 1745 1746 static void 1747 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1748 enum spdk_bdev_io_type io_type) 1749 { 1750 struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); 1751 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; 1752 struct spdk_nvme_ns *ns = NULL; 1753 struct spdk_nvme_qpair *qpair = NULL; 1754 1755 CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); 1756 1757 bdev_io->type = io_type; 1758 bdev_io->internal.in_submit_request = true; 1759 1760 bdev_nvme_submit_request(ch, bdev_io); 1761 1762 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1763 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1764 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1765 } 1766 1767 static void 1768 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 1769 { 1770 struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); 1771 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 1772 struct ut_nvme_req *req; 1773 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; 1774 struct spdk_nvme_ns *ns = NULL; 1775 struct spdk_nvme_qpair *qpair = NULL; 1776 1777 CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); 1778 1779 /* Only compare and write now. */ 1780 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 1781 bdev_io->internal.in_submit_request = true; 1782 1783 bdev_nvme_submit_request(ch, bdev_io); 1784 1785 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1786 CU_ASSERT(qpair->num_outstanding_reqs == 2); 1787 CU_ASSERT(bio->first_fused_submitted == true); 1788 1789 /* First outstanding request is compare operation. */ 1790 req = TAILQ_FIRST(&io_path->qpair->outstanding_reqs); 1791 SPDK_CU_ASSERT_FATAL(req != NULL); 1792 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 1793 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 1794 1795 poll_threads(); 1796 1797 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1798 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1799 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1800 } 1801 1802 static void 1803 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1804 struct spdk_nvme_ctrlr *ctrlr) 1805 { 1806 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 1807 bdev_io->internal.in_submit_request = true; 1808 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1809 1810 bdev_nvme_submit_request(ch, bdev_io); 1811 1812 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1813 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 1814 1815 spdk_delay_us(10000); 1816 poll_thread_times(1, 1); 1817 1818 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1819 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 1820 1821 poll_thread_times(0, 1); 1822 1823 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1824 } 1825 1826 static void 1827 test_submit_nvme_cmd(void) 1828 { 1829 struct spdk_nvme_transport_id trid = {}; 1830 struct spdk_nvme_host_id hostid = {}; 1831 struct spdk_nvme_ctrlr *ctrlr; 1832 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1833 const int STRING_SIZE = 32; 1834 const char *attached_names[STRING_SIZE]; 1835 struct nvme_bdev *bdev; 1836 struct spdk_bdev_io *bdev_io; 1837 struct spdk_io_channel *ch; 1838 int rc; 1839 1840 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1841 ut_init_trid(&trid); 1842 1843 set_thread(1); 1844 1845 ctrlr = ut_attach_ctrlr(&trid, 1); 1846 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1847 1848 g_ut_attach_ctrlr_status = 0; 1849 g_ut_attach_bdev_count = 1; 1850 1851 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1852 attach_ctrlr_done, NULL, NULL); 1853 CU_ASSERT(rc == 0); 1854 1855 spdk_delay_us(1000); 1856 poll_threads(); 1857 1858 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1859 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1860 1861 bdev = nvme_bdev_ctrlr->namespaces[0]->bdev; 1862 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1863 1864 set_thread(0); 1865 1866 ch = spdk_get_io_channel(nvme_bdev_ctrlr); 1867 SPDK_CU_ASSERT_FATAL(ch != NULL); 1868 1869 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 1870 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1871 bdev_io->bdev = &bdev->disk; 1872 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 1873 1874 bdev_io->u.bdev.iovs = NULL; 1875 1876 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 1877 1878 ut_bdev_io_set_buf(bdev_io); 1879 1880 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 1881 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 1882 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 1883 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 1884 1885 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 1886 1887 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 1888 1889 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 1890 1891 free(bdev_io); 1892 1893 spdk_put_io_channel(ch); 1894 1895 poll_threads(); 1896 1897 set_thread(1); 1898 1899 rc = bdev_nvme_delete("nvme0", NULL); 1900 CU_ASSERT(rc == 0); 1901 1902 poll_threads(); 1903 1904 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1905 } 1906 1907 static void 1908 test_remove_trid(void) 1909 { 1910 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1911 struct spdk_nvme_ctrlr ctrlr = {}; 1912 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1913 struct nvme_bdev_ctrlr_trid *ctrid; 1914 int rc; 1915 1916 ut_init_trid(&trid1); 1917 ut_init_trid2(&trid2); 1918 ut_init_trid3(&trid3); 1919 1920 set_thread(0); 1921 1922 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1923 1924 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1925 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1926 1927 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2, NULL); 1928 1929 /* trid3 is not in the registered list. */ 1930 rc = bdev_nvme_delete("nvme0", &trid3); 1931 CU_ASSERT(rc == -ENXIO); 1932 1933 /* trid2 is not used, and simply removed. */ 1934 rc = bdev_nvme_delete("nvme0", &trid2); 1935 CU_ASSERT(rc == 0); 1936 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1937 TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) { 1938 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0); 1939 } 1940 1941 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid3, NULL); 1942 1943 /* trid1 is currently used and trid3 is an alternative path. 1944 * If we remove trid1, path is changed to trid3. 1945 */ 1946 rc = bdev_nvme_delete("nvme0", &trid1); 1947 CU_ASSERT(rc == 0); 1948 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1949 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1950 TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) { 1951 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0); 1952 } 1953 CU_ASSERT(spdk_nvme_transport_id_compare(nvme_bdev_ctrlr->connected_trid, &trid3) == 0); 1954 1955 poll_threads(); 1956 1957 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1958 1959 /* trid3 is the current and only path. If we remove trid3, the corresponding 1960 * nvme_bdev_ctrlr is removed. 1961 */ 1962 rc = bdev_nvme_delete("nvme0", &trid3); 1963 CU_ASSERT(rc == 0); 1964 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1965 1966 poll_threads(); 1967 1968 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1969 1970 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1971 1972 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1973 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1974 1975 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2, NULL); 1976 1977 /* If trid is not specified, nvme_bdev_ctrlr itself is removed. */ 1978 rc = bdev_nvme_delete("nvme0", NULL); 1979 CU_ASSERT(rc == 0); 1980 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1981 1982 poll_threads(); 1983 1984 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1985 } 1986 1987 static void 1988 test_abort(void) 1989 { 1990 struct spdk_nvme_transport_id trid = {}; 1991 struct spdk_nvme_host_id hostid = {}; 1992 struct spdk_nvme_ctrlr *ctrlr; 1993 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1994 const int STRING_SIZE = 32; 1995 const char *attached_names[STRING_SIZE]; 1996 struct nvme_bdev *bdev; 1997 struct spdk_bdev_io *write_io, *admin_io, *abort_io; 1998 struct spdk_io_channel *ch1, *ch2; 1999 struct nvme_io_path *io_path1; 2000 int rc; 2001 2002 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2003 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2004 * are submitted on thread 1. Both should succeed. 2005 */ 2006 2007 ut_init_trid(&trid); 2008 2009 ctrlr = ut_attach_ctrlr(&trid, 1); 2010 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2011 2012 g_ut_attach_ctrlr_status = 0; 2013 g_ut_attach_bdev_count = 1; 2014 2015 set_thread(1); 2016 2017 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 2018 attach_ctrlr_done, NULL, NULL); 2019 CU_ASSERT(rc == 0); 2020 2021 spdk_delay_us(1000); 2022 poll_threads(); 2023 2024 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 2025 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 2026 2027 bdev = nvme_bdev_ctrlr->namespaces[0]->bdev; 2028 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2029 2030 write_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 2031 SPDK_CU_ASSERT_FATAL(write_io != NULL); 2032 write_io->bdev = &bdev->disk; 2033 write_io->type = SPDK_BDEV_IO_TYPE_WRITE; 2034 ut_bdev_io_set_buf(write_io); 2035 2036 admin_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 2037 SPDK_CU_ASSERT_FATAL(admin_io != NULL); 2038 admin_io->bdev = &bdev->disk; 2039 admin_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2040 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2041 2042 abort_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 2043 SPDK_CU_ASSERT_FATAL(abort_io != NULL); 2044 abort_io->bdev = &bdev->disk; 2045 abort_io->type = SPDK_BDEV_IO_TYPE_ABORT; 2046 2047 set_thread(0); 2048 2049 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 2050 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2051 io_path1 = spdk_io_channel_get_ctx(ch1); 2052 2053 set_thread(1); 2054 2055 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 2056 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2057 2058 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2059 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2060 2061 /* Aborting the already completed request should fail. */ 2062 write_io->internal.in_submit_request = true; 2063 bdev_nvme_submit_request(ch1, write_io); 2064 poll_threads(); 2065 2066 CU_ASSERT(write_io->internal.in_submit_request == false); 2067 2068 abort_io->u.abort.bio_to_abort = write_io; 2069 abort_io->internal.in_submit_request = true; 2070 2071 bdev_nvme_submit_request(ch1, abort_io); 2072 2073 poll_threads(); 2074 2075 CU_ASSERT(abort_io->internal.in_submit_request == false); 2076 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2077 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2078 2079 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2080 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2081 2082 admin_io->internal.in_submit_request = true; 2083 bdev_nvme_submit_request(ch1, admin_io); 2084 spdk_delay_us(10000); 2085 poll_threads(); 2086 2087 CU_ASSERT(admin_io->internal.in_submit_request == false); 2088 2089 abort_io->u.abort.bio_to_abort = admin_io; 2090 abort_io->internal.in_submit_request = true; 2091 2092 bdev_nvme_submit_request(ch2, abort_io); 2093 2094 poll_threads(); 2095 2096 CU_ASSERT(abort_io->internal.in_submit_request == false); 2097 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2098 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2099 2100 /* Aborting the write request should succeed. */ 2101 write_io->internal.in_submit_request = true; 2102 bdev_nvme_submit_request(ch1, write_io); 2103 2104 CU_ASSERT(write_io->internal.in_submit_request == true); 2105 CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 1); 2106 2107 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2108 abort_io->u.abort.bio_to_abort = write_io; 2109 abort_io->internal.in_submit_request = true; 2110 2111 bdev_nvme_submit_request(ch1, abort_io); 2112 2113 spdk_delay_us(10000); 2114 poll_threads(); 2115 2116 CU_ASSERT(abort_io->internal.in_submit_request == false); 2117 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2118 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2119 CU_ASSERT(write_io->internal.in_submit_request == false); 2120 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2121 CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 0); 2122 2123 /* Aborting the admin request should succeed. */ 2124 admin_io->internal.in_submit_request = true; 2125 bdev_nvme_submit_request(ch1, admin_io); 2126 2127 CU_ASSERT(admin_io->internal.in_submit_request == true); 2128 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2129 2130 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2131 abort_io->u.abort.bio_to_abort = admin_io; 2132 abort_io->internal.in_submit_request = true; 2133 2134 bdev_nvme_submit_request(ch2, abort_io); 2135 2136 spdk_delay_us(10000); 2137 poll_threads(); 2138 2139 CU_ASSERT(abort_io->internal.in_submit_request == false); 2140 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2141 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2142 CU_ASSERT(admin_io->internal.in_submit_request == false); 2143 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2144 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2145 2146 set_thread(0); 2147 2148 spdk_put_io_channel(ch1); 2149 2150 set_thread(1); 2151 2152 spdk_put_io_channel(ch2); 2153 2154 poll_threads(); 2155 2156 free(write_io); 2157 free(admin_io); 2158 free(abort_io); 2159 2160 set_thread(1); 2161 2162 rc = bdev_nvme_delete("nvme0", NULL); 2163 CU_ASSERT(rc == 0); 2164 2165 poll_threads(); 2166 2167 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 2168 } 2169 2170 static void 2171 test_get_io_qpair(void) 2172 { 2173 struct spdk_nvme_transport_id trid = {}; 2174 struct spdk_nvme_ctrlr ctrlr = {}; 2175 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 2176 struct spdk_io_channel *ch; 2177 struct nvme_io_path *io_path; 2178 struct spdk_nvme_qpair *qpair; 2179 int rc; 2180 2181 ut_init_trid(&trid); 2182 TAILQ_INIT(&ctrlr.active_io_qpairs); 2183 2184 set_thread(0); 2185 2186 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 2187 2188 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 2189 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 2190 2191 ch = spdk_get_io_channel(nvme_bdev_ctrlr); 2192 SPDK_CU_ASSERT_FATAL(ch != NULL); 2193 io_path = spdk_io_channel_get_ctx(ch); 2194 CU_ASSERT(io_path->qpair != NULL); 2195 2196 qpair = bdev_nvme_get_io_qpair(ch); 2197 CU_ASSERT(qpair == io_path->qpair); 2198 2199 spdk_put_io_channel(ch); 2200 2201 rc = bdev_nvme_delete("nvme0", NULL); 2202 CU_ASSERT(rc == 0); 2203 2204 poll_threads(); 2205 2206 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 2207 } 2208 2209 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2210 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2211 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2212 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2213 */ 2214 static void 2215 test_bdev_unregister(void) 2216 { 2217 struct spdk_nvme_transport_id trid = {}; 2218 struct spdk_nvme_host_id hostid = {}; 2219 struct spdk_nvme_ctrlr *ctrlr; 2220 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 2221 struct nvme_bdev_ns *nvme_ns1, *nvme_ns2; 2222 const int STRING_SIZE = 32; 2223 const char *attached_names[STRING_SIZE]; 2224 struct nvme_bdev *bdev1, *bdev2; 2225 int rc; 2226 2227 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2228 ut_init_trid(&trid); 2229 2230 ctrlr = ut_attach_ctrlr(&trid, 2); 2231 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2232 2233 g_ut_attach_ctrlr_status = 0; 2234 g_ut_attach_bdev_count = 2; 2235 2236 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 2237 attach_ctrlr_done, NULL, NULL); 2238 CU_ASSERT(rc == 0); 2239 2240 spdk_delay_us(1000); 2241 poll_threads(); 2242 2243 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 2244 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 2245 2246 nvme_ns1 = nvme_bdev_ctrlr->namespaces[0]; 2247 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2248 2249 bdev1 = nvme_ns1->bdev; 2250 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2251 2252 nvme_ns2 = nvme_bdev_ctrlr->namespaces[1]; 2253 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2254 2255 bdev2 = nvme_ns2->bdev; 2256 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2257 2258 bdev_nvme_destruct(&bdev1->disk); 2259 bdev_nvme_destruct(&bdev2->disk); 2260 2261 poll_threads(); 2262 2263 CU_ASSERT(nvme_ns1->bdev == NULL); 2264 CU_ASSERT(nvme_ns2->bdev == NULL); 2265 2266 nvme_bdev_ctrlr->destruct = true; 2267 _nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr); 2268 2269 poll_threads(); 2270 2271 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 2272 } 2273 2274 static void 2275 test_compare_ns(void) 2276 { 2277 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2278 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2279 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2280 2281 /* No IDs are defined. */ 2282 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2283 2284 /* Only EUI64 are defined and not matched. */ 2285 nsdata1.eui64 = 0xABCDEF0123456789; 2286 nsdata2.eui64 = 0xBBCDEF0123456789; 2287 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2288 2289 /* Only EUI64 are defined and matched. */ 2290 nsdata2.eui64 = 0xABCDEF0123456789; 2291 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2292 2293 /* Only NGUID are defined and not matched. */ 2294 nsdata1.eui64 = 0x0; 2295 nsdata2.eui64 = 0x0; 2296 nsdata1.nguid[0] = 0x12; 2297 nsdata2.nguid[0] = 0x10; 2298 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2299 2300 /* Only NGUID are defined and matched. */ 2301 nsdata2.nguid[0] = 0x12; 2302 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2303 2304 /* Only UUID are defined and not matched. */ 2305 nsdata1.nguid[0] = 0x0; 2306 nsdata2.nguid[0] = 0x0; 2307 ns1.uuid.u.raw[0] = 0xAA; 2308 ns2.uuid.u.raw[0] = 0xAB; 2309 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2310 2311 /* Only UUID are defined and matched. */ 2312 ns1.uuid.u.raw[0] = 0xAB; 2313 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2314 2315 /* All EUI64, NGUID, and UUID are defined and matched. */ 2316 nsdata1.eui64 = 0x123456789ABCDEF; 2317 nsdata2.eui64 = 0x123456789ABCDEF; 2318 nsdata1.nguid[15] = 0x34; 2319 nsdata2.nguid[15] = 0x34; 2320 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2321 } 2322 2323 static void 2324 init_accel(void) 2325 { 2326 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 2327 sizeof(int), "accel_p"); 2328 } 2329 2330 static void 2331 fini_accel(void) 2332 { 2333 spdk_io_device_unregister(g_accel_p, NULL); 2334 } 2335 2336 int 2337 main(int argc, const char **argv) 2338 { 2339 CU_pSuite suite = NULL; 2340 unsigned int num_failures; 2341 2342 CU_set_error_action(CUEA_ABORT); 2343 CU_initialize_registry(); 2344 2345 suite = CU_add_suite("nvme", NULL, NULL); 2346 2347 CU_ADD_TEST(suite, test_create_ctrlr); 2348 CU_ADD_TEST(suite, test_reset_ctrlr); 2349 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 2350 CU_ADD_TEST(suite, test_failover_ctrlr); 2351 CU_ADD_TEST(suite, test_pending_reset); 2352 CU_ADD_TEST(suite, test_attach_ctrlr); 2353 CU_ADD_TEST(suite, test_reconnect_qpair); 2354 CU_ADD_TEST(suite, test_aer_cb); 2355 CU_ADD_TEST(suite, test_submit_nvme_cmd); 2356 CU_ADD_TEST(suite, test_remove_trid); 2357 CU_ADD_TEST(suite, test_abort); 2358 CU_ADD_TEST(suite, test_get_io_qpair); 2359 CU_ADD_TEST(suite, test_bdev_unregister); 2360 CU_ADD_TEST(suite, test_compare_ns); 2361 2362 CU_basic_set_mode(CU_BRM_VERBOSE); 2363 2364 allocate_threads(3); 2365 set_thread(0); 2366 bdev_nvme_library_init(); 2367 init_accel(); 2368 2369 CU_basic_run_tests(); 2370 2371 set_thread(0); 2372 bdev_nvme_library_fini(); 2373 fini_accel(); 2374 free_threads(); 2375 2376 num_failures = CU_get_number_of_failures(); 2377 CU_cleanup_registry(); 2378 2379 return num_failures; 2380 } 2381