1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk_cunit.h" 36 #include "spdk/thread.h" 37 #include "spdk/bdev_module.h" 38 #include "spdk/bdev_module.h" 39 40 #include "common/lib/ut_multithread.c" 41 42 #include "bdev/nvme/bdev_nvme.c" 43 #include "bdev/nvme/common.c" 44 45 #include "unit/lib/json_mock.c" 46 47 static void *g_accel_p = (void *)0xdeadbeaf; 48 49 DEFINE_STUB(spdk_nvme_probe_async, struct spdk_nvme_probe_ctx *, 50 (const struct spdk_nvme_transport_id *trid, void *cb_ctx, 51 spdk_nvme_probe_cb probe_cb, spdk_nvme_attach_cb attach_cb, 52 spdk_nvme_remove_cb remove_cb), NULL); 53 54 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 55 enum spdk_nvme_transport_type trtype)); 56 57 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, const char *, (enum spdk_nvme_transport_type trtype), 58 NULL); 59 60 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL); 61 62 DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts, 63 size_t opts_size)); 64 65 DEFINE_STUB(spdk_nvme_ctrlr_set_trid, int, (struct spdk_nvme_ctrlr *ctrlr, 66 struct spdk_nvme_transport_id *trid), 0); 67 68 DEFINE_STUB_V(spdk_nvme_ctrlr_set_remove_cb, (struct spdk_nvme_ctrlr *ctrlr, 69 spdk_nvme_remove_cb remove_cb, void *remove_ctx)); 70 71 DEFINE_STUB(spdk_nvme_ctrlr_get_flags, uint64_t, (struct spdk_nvme_ctrlr *ctrlr), 0); 72 73 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 74 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 75 76 struct spdk_io_channel * 77 spdk_accel_engine_get_io_channel(void) 78 { 79 return spdk_get_io_channel(g_accel_p); 80 } 81 82 void 83 spdk_nvme_ctrlr_get_default_io_qpair_opts(struct spdk_nvme_ctrlr *ctrlr, 84 struct spdk_nvme_io_qpair_opts *opts, size_t opts_size) 85 { 86 /* Avoid warning that opts is used uninitialised */ 87 memset(opts, 0, opts_size); 88 } 89 90 DEFINE_STUB(spdk_nvme_ctrlr_get_max_xfer_size, uint32_t, 91 (const struct spdk_nvme_ctrlr *ctrlr), 0); 92 93 DEFINE_STUB(spdk_nvme_ctrlr_get_transport_id, const struct spdk_nvme_transport_id *, 94 (struct spdk_nvme_ctrlr *ctrlr), NULL); 95 96 DEFINE_STUB_V(spdk_nvme_ctrlr_register_aer_callback, (struct spdk_nvme_ctrlr *ctrlr, 97 spdk_nvme_aer_cb aer_cb_fn, void *aer_cb_arg)); 98 99 DEFINE_STUB_V(spdk_nvme_ctrlr_register_timeout_callback, (struct spdk_nvme_ctrlr *ctrlr, 100 uint64_t timeout_us, spdk_nvme_timeout_cb cb_fn, void *cb_arg)); 101 102 DEFINE_STUB(spdk_nvme_ctrlr_is_ocssd_supported, bool, (struct spdk_nvme_ctrlr *ctrlr), false); 103 104 DEFINE_STUB(spdk_nvme_ctrlr_cmd_abort, int, (struct spdk_nvme_ctrlr *ctrlr, 105 struct spdk_nvme_qpair *qpair, uint16_t cid, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 106 107 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw, int, (struct spdk_nvme_ctrlr *ctrlr, 108 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 109 uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 110 111 DEFINE_STUB(spdk_nvme_ctrlr_cmd_io_raw_with_md, int, (struct spdk_nvme_ctrlr *ctrlr, 112 struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd, void *buf, 113 uint32_t len, void *md_buf, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 114 115 DEFINE_STUB(spdk_nvme_ns_get_max_io_xfer_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 116 117 DEFINE_STUB(spdk_nvme_ns_get_extended_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 118 119 DEFINE_STUB(spdk_nvme_ns_get_sector_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 120 121 DEFINE_STUB(spdk_nvme_ns_get_pi_type, enum spdk_nvme_pi_type, (struct spdk_nvme_ns *ns), 0); 122 123 DEFINE_STUB(spdk_nvme_ns_supports_compare, bool, (struct spdk_nvme_ns *ns), false); 124 125 DEFINE_STUB(spdk_nvme_ns_get_md_size, uint32_t, (struct spdk_nvme_ns *ns), 0); 126 127 DEFINE_STUB(spdk_nvme_ns_get_dealloc_logical_block_read_value, 128 enum spdk_nvme_dealloc_logical_block_read_value, (struct spdk_nvme_ns *ns), 0); 129 130 DEFINE_STUB(spdk_nvme_ns_get_optimal_io_boundary, uint32_t, (struct spdk_nvme_ns *ns), 0); 131 132 DEFINE_STUB(spdk_nvme_ns_get_ana_state, enum spdk_nvme_ana_state, 133 (const struct spdk_nvme_ns *ns), 0); 134 135 DEFINE_STUB(spdk_nvme_ns_get_csi, enum spdk_nvme_csi, 136 (const struct spdk_nvme_ns *ns), 0); 137 138 DEFINE_STUB(spdk_nvme_cuse_get_ns_name, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, 139 char *name, size_t *size), 0); 140 141 DEFINE_STUB(spdk_nvme_zns_ns_get_zone_size_sectors, uint64_t, 142 (struct spdk_nvme_ns *ns), 0); 143 144 DEFINE_STUB(spdk_nvme_zns_ctrlr_get_max_zone_append_size, uint32_t, 145 (const struct spdk_nvme_ctrlr *ctrlr), 0); 146 147 DEFINE_STUB(spdk_nvme_zns_ns_get_max_open_zones, uint32_t, 148 (struct spdk_nvme_ns *ns), 0); 149 150 DEFINE_STUB(spdk_nvme_zns_ns_get_max_active_zones, uint32_t, 151 (struct spdk_nvme_ns *ns), 0); 152 153 DEFINE_STUB(spdk_nvme_zns_ns_get_num_zones, uint64_t, 154 (struct spdk_nvme_ns *ns), 0); 155 156 DEFINE_STUB(spdk_nvme_zns_zone_append_with_md, int, 157 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, void *metadata, 158 uint64_t zslba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 159 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag), 0); 160 161 DEFINE_STUB(spdk_nvme_zns_zone_appendv_with_md, int, 162 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t zslba, 163 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 164 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, spdk_nvme_req_next_sge_cb next_sge_fn, 165 void *metadata, uint16_t apptag_mask, uint16_t apptag), 0); 166 167 DEFINE_STUB(spdk_nvme_zns_report_zones, int, 168 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 169 void *payload, uint32_t payload_size, uint64_t slba, 170 enum spdk_nvme_zns_zra_report_opts report_opts, bool partial_report, 171 spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 172 173 DEFINE_STUB(spdk_nvme_zns_close_zone, int, 174 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 175 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 176 177 DEFINE_STUB(spdk_nvme_zns_finish_zone, int, 178 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 179 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 180 181 DEFINE_STUB(spdk_nvme_zns_open_zone, int, 182 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 183 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 184 185 DEFINE_STUB(spdk_nvme_zns_reset_zone, int, 186 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 187 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 188 189 DEFINE_STUB(spdk_nvme_zns_offline_zone, int, 190 (struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, uint64_t slba, 191 bool select_all, spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0); 192 193 DEFINE_STUB_V(spdk_bdev_module_finish_done, (void)); 194 195 DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module)); 196 197 DEFINE_STUB(spdk_opal_dev_construct, struct spdk_opal_dev *, (struct spdk_nvme_ctrlr *ctrlr), NULL); 198 199 DEFINE_STUB_V(spdk_opal_dev_destruct, (struct spdk_opal_dev *dev)); 200 201 DEFINE_STUB_V(bdev_ocssd_populate_namespace, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr, 202 struct nvme_bdev_ns *nvme_ns, struct nvme_async_probe_ctx *ctx)); 203 204 DEFINE_STUB_V(bdev_ocssd_depopulate_namespace, (struct nvme_bdev_ns *nvme_ns)); 205 206 DEFINE_STUB_V(bdev_ocssd_namespace_config_json, (struct spdk_json_write_ctx *w, 207 struct nvme_bdev_ns *nvme_ns)); 208 209 DEFINE_STUB(bdev_ocssd_create_io_channel, int, (struct nvme_io_path *ioch), 0); 210 211 DEFINE_STUB_V(bdev_ocssd_destroy_io_channel, (struct nvme_io_path *ioch)); 212 213 DEFINE_STUB(bdev_ocssd_init_ctrlr, int, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr), 0); 214 215 DEFINE_STUB_V(bdev_ocssd_fini_ctrlr, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)); 216 217 DEFINE_STUB_V(bdev_ocssd_handle_chunk_notification, (struct nvme_bdev_ctrlr *nvme_bdev_ctrlr)); 218 219 DEFINE_STUB(spdk_accel_submit_crc32cv, int, (struct spdk_io_channel *ch, uint32_t *dst, 220 struct iovec *iov, 221 uint32_t iov_cnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 0); 222 223 224 struct ut_nvme_req { 225 uint16_t opc; 226 spdk_nvme_cmd_cb cb_fn; 227 void *cb_arg; 228 struct spdk_nvme_cpl cpl; 229 TAILQ_ENTRY(ut_nvme_req) tailq; 230 }; 231 232 struct spdk_nvme_ns { 233 struct spdk_nvme_ctrlr *ctrlr; 234 uint32_t id; 235 bool is_active; 236 struct spdk_uuid uuid; 237 }; 238 239 struct spdk_nvme_qpair { 240 struct spdk_nvme_ctrlr *ctrlr; 241 bool is_connected; 242 TAILQ_HEAD(, ut_nvme_req) outstanding_reqs; 243 uint32_t num_outstanding_reqs; 244 TAILQ_ENTRY(spdk_nvme_qpair) poll_group_tailq; 245 struct spdk_nvme_poll_group *poll_group; 246 TAILQ_ENTRY(spdk_nvme_qpair) tailq; 247 }; 248 249 struct spdk_nvme_ctrlr { 250 uint32_t num_ns; 251 struct spdk_nvme_ns *ns; 252 struct spdk_nvme_ns_data *nsdata; 253 struct spdk_nvme_qpair adminq; 254 struct spdk_nvme_ctrlr_data cdata; 255 bool attached; 256 bool is_failed; 257 bool fail_reset; 258 struct spdk_nvme_transport_id trid; 259 TAILQ_HEAD(, spdk_nvme_qpair) active_io_qpairs; 260 TAILQ_ENTRY(spdk_nvme_ctrlr) tailq; 261 struct spdk_nvme_ctrlr_opts opts; 262 }; 263 264 struct spdk_nvme_poll_group { 265 void *ctx; 266 struct spdk_nvme_accel_fn_table accel_fn_table; 267 TAILQ_HEAD(, spdk_nvme_qpair) qpairs; 268 }; 269 270 struct spdk_nvme_probe_ctx { 271 struct spdk_nvme_transport_id trid; 272 void *cb_ctx; 273 spdk_nvme_attach_cb attach_cb; 274 struct spdk_nvme_ctrlr *init_ctrlr; 275 }; 276 277 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_init_ctrlrs = TAILQ_HEAD_INITIALIZER(g_ut_init_ctrlrs); 278 static TAILQ_HEAD(, spdk_nvme_ctrlr) g_ut_attached_ctrlrs = TAILQ_HEAD_INITIALIZER( 279 g_ut_attached_ctrlrs); 280 static int g_ut_attach_ctrlr_status; 281 static size_t g_ut_attach_bdev_count; 282 static int g_ut_register_bdev_status; 283 284 static void 285 ut_init_trid(struct spdk_nvme_transport_id *trid) 286 { 287 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 288 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 289 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8"); 290 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 291 } 292 293 static void 294 ut_init_trid2(struct spdk_nvme_transport_id *trid) 295 { 296 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 297 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 298 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9"); 299 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 300 } 301 302 static void 303 ut_init_trid3(struct spdk_nvme_transport_id *trid) 304 { 305 trid->trtype = SPDK_NVME_TRANSPORT_TCP; 306 snprintf(trid->subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1"); 307 snprintf(trid->traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.10"); 308 snprintf(trid->trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420"); 309 } 310 311 static int 312 cmp_int(int a, int b) 313 { 314 return a - b; 315 } 316 317 int 318 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 319 const struct spdk_nvme_transport_id *trid2) 320 { 321 int cmp; 322 323 /* We assume trtype is TCP for now. */ 324 CU_ASSERT(trid1->trtype == SPDK_NVME_TRANSPORT_TCP); 325 326 cmp = cmp_int(trid1->trtype, trid2->trtype); 327 if (cmp) { 328 return cmp; 329 } 330 331 cmp = strcasecmp(trid1->traddr, trid2->traddr); 332 if (cmp) { 333 return cmp; 334 } 335 336 cmp = cmp_int(trid1->adrfam, trid2->adrfam); 337 if (cmp) { 338 return cmp; 339 } 340 341 cmp = strcasecmp(trid1->trsvcid, trid2->trsvcid); 342 if (cmp) { 343 return cmp; 344 } 345 346 cmp = strcmp(trid1->subnqn, trid2->subnqn); 347 if (cmp) { 348 return cmp; 349 } 350 351 return 0; 352 } 353 354 static struct spdk_nvme_ctrlr * 355 ut_attach_ctrlr(const struct spdk_nvme_transport_id *trid, uint32_t num_ns) 356 { 357 struct spdk_nvme_ctrlr *ctrlr; 358 uint32_t i; 359 360 TAILQ_FOREACH(ctrlr, &g_ut_init_ctrlrs, tailq) { 361 if (spdk_nvme_transport_id_compare(&ctrlr->trid, trid) == 0) { 362 /* There is a ctrlr whose trid matches. */ 363 return NULL; 364 } 365 } 366 367 ctrlr = calloc(1, sizeof(*ctrlr)); 368 if (ctrlr == NULL) { 369 return NULL; 370 } 371 372 ctrlr->attached = true; 373 ctrlr->adminq.ctrlr = ctrlr; 374 TAILQ_INIT(&ctrlr->adminq.outstanding_reqs); 375 376 if (num_ns != 0) { 377 ctrlr->num_ns = num_ns; 378 ctrlr->ns = calloc(num_ns, sizeof(struct spdk_nvme_ns)); 379 if (ctrlr->ns == NULL) { 380 free(ctrlr); 381 return NULL; 382 } 383 384 ctrlr->nsdata = calloc(num_ns, sizeof(struct spdk_nvme_ns_data)); 385 if (ctrlr->nsdata == NULL) { 386 free(ctrlr->ns); 387 free(ctrlr); 388 return NULL; 389 } 390 391 for (i = 0; i < num_ns; i++) { 392 ctrlr->ns[i].id = i + 1; 393 ctrlr->ns[i].ctrlr = ctrlr; 394 ctrlr->ns[i].is_active = true; 395 ctrlr->nsdata[i].nsze = 1024; 396 } 397 } 398 399 ctrlr->trid = *trid; 400 TAILQ_INIT(&ctrlr->active_io_qpairs); 401 402 TAILQ_INSERT_TAIL(&g_ut_init_ctrlrs, ctrlr, tailq); 403 404 return ctrlr; 405 } 406 407 static void 408 ut_detach_ctrlr(struct spdk_nvme_ctrlr *ctrlr) 409 { 410 CU_ASSERT(TAILQ_EMPTY(&ctrlr->active_io_qpairs)); 411 412 TAILQ_REMOVE(&g_ut_attached_ctrlrs, ctrlr, tailq); 413 free(ctrlr->nsdata); 414 free(ctrlr->ns); 415 free(ctrlr); 416 } 417 418 static int 419 ut_submit_nvme_request(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 420 uint16_t opc, spdk_nvme_cmd_cb cb_fn, void *cb_arg) 421 { 422 struct ut_nvme_req *req; 423 424 req = calloc(1, sizeof(*req)); 425 if (req == NULL) { 426 return -ENOMEM; 427 } 428 429 req->opc = opc; 430 req->cb_fn = cb_fn; 431 req->cb_arg = cb_arg; 432 433 req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 434 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 435 436 TAILQ_INSERT_TAIL(&qpair->outstanding_reqs, req, tailq); 437 qpair->num_outstanding_reqs++; 438 439 return 0; 440 } 441 442 static void 443 ut_bdev_io_set_buf(struct spdk_bdev_io *bdev_io) 444 { 445 bdev_io->u.bdev.iovs = &bdev_io->iov; 446 bdev_io->u.bdev.iovcnt = 1; 447 448 bdev_io->iov.iov_base = (void *)0xFEEDBEEF; 449 bdev_io->iov.iov_len = 4096; 450 } 451 452 static void 453 nvme_ctrlr_poll_internal(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_probe_ctx *probe_ctx) 454 { 455 if (ctrlr->is_failed) { 456 free(ctrlr); 457 return; 458 } 459 460 TAILQ_INSERT_TAIL(&g_ut_attached_ctrlrs, ctrlr, tailq); 461 462 if (probe_ctx->attach_cb) { 463 probe_ctx->attach_cb(probe_ctx->cb_ctx, &ctrlr->trid, ctrlr, &ctrlr->opts); 464 } 465 } 466 467 int 468 spdk_nvme_probe_poll_async(struct spdk_nvme_probe_ctx *probe_ctx) 469 { 470 struct spdk_nvme_ctrlr *ctrlr, *tmp; 471 472 TAILQ_FOREACH_SAFE(ctrlr, &g_ut_init_ctrlrs, tailq, tmp) { 473 if (spdk_nvme_transport_id_compare(&ctrlr->trid, &probe_ctx->trid) != 0) { 474 continue; 475 } 476 TAILQ_REMOVE(&g_ut_init_ctrlrs, ctrlr, tailq); 477 nvme_ctrlr_poll_internal(ctrlr, probe_ctx); 478 } 479 480 free(probe_ctx); 481 482 return 0; 483 } 484 485 struct spdk_nvme_probe_ctx * 486 spdk_nvme_connect_async(const struct spdk_nvme_transport_id *trid, 487 const struct spdk_nvme_ctrlr_opts *opts, 488 spdk_nvme_attach_cb attach_cb) 489 { 490 struct spdk_nvme_probe_ctx *probe_ctx; 491 492 if (trid == NULL) { 493 return NULL; 494 } 495 496 probe_ctx = calloc(1, sizeof(*probe_ctx)); 497 if (probe_ctx == NULL) { 498 return NULL; 499 } 500 501 probe_ctx->trid = *trid; 502 probe_ctx->cb_ctx = (void *)opts; 503 probe_ctx->attach_cb = attach_cb; 504 505 return probe_ctx; 506 } 507 508 int 509 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 510 { 511 if (ctrlr->attached) { 512 ut_detach_ctrlr(ctrlr); 513 } 514 515 return 0; 516 } 517 518 const struct spdk_nvme_ctrlr_data * 519 spdk_nvme_ctrlr_get_data(struct spdk_nvme_ctrlr *ctrlr) 520 { 521 return &ctrlr->cdata; 522 } 523 524 uint32_t 525 spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr) 526 { 527 return ctrlr->num_ns; 528 } 529 530 struct spdk_nvme_ns * 531 spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 532 { 533 if (nsid < 1 || nsid > ctrlr->num_ns) { 534 return NULL; 535 } 536 537 return &ctrlr->ns[nsid - 1]; 538 } 539 540 bool 541 spdk_nvme_ctrlr_is_active_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid) 542 { 543 if (nsid < 1 || nsid > ctrlr->num_ns) { 544 return false; 545 } 546 547 return ctrlr->ns[nsid - 1].is_active; 548 } 549 550 union spdk_nvme_csts_register 551 spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr) 552 { 553 union spdk_nvme_csts_register csts; 554 555 csts.raw = 0; 556 557 return csts; 558 } 559 560 union spdk_nvme_vs_register 561 spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr) 562 { 563 union spdk_nvme_vs_register vs; 564 565 vs.raw = 0; 566 567 return vs; 568 } 569 570 struct spdk_nvme_qpair * 571 spdk_nvme_ctrlr_alloc_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 572 const struct spdk_nvme_io_qpair_opts *user_opts, 573 size_t opts_size) 574 { 575 struct spdk_nvme_qpair *qpair; 576 577 qpair = calloc(1, sizeof(*qpair)); 578 if (qpair == NULL) { 579 return NULL; 580 } 581 582 qpair->ctrlr = ctrlr; 583 TAILQ_INIT(&qpair->outstanding_reqs); 584 TAILQ_INSERT_TAIL(&ctrlr->active_io_qpairs, qpair, tailq); 585 586 return qpair; 587 } 588 589 int 590 spdk_nvme_ctrlr_connect_io_qpair(struct spdk_nvme_ctrlr *ctrlr, 591 struct spdk_nvme_qpair *qpair) 592 { 593 if (qpair->is_connected) { 594 return -EISCONN; 595 } 596 597 qpair->is_connected = true; 598 599 return 0; 600 } 601 602 int 603 spdk_nvme_ctrlr_reconnect_io_qpair(struct spdk_nvme_qpair *qpair) 604 { 605 struct spdk_nvme_ctrlr *ctrlr; 606 607 ctrlr = qpair->ctrlr; 608 609 if (ctrlr->is_failed) { 610 return -ENXIO; 611 } 612 qpair->is_connected = true; 613 614 return 0; 615 } 616 617 void 618 spdk_nvme_ctrlr_disconnect_io_qpair(struct spdk_nvme_qpair *qpair) 619 { 620 qpair->is_connected = false; 621 } 622 623 int 624 spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair) 625 { 626 SPDK_CU_ASSERT_FATAL(qpair->ctrlr != NULL); 627 628 qpair->is_connected = false; 629 630 if (qpair->poll_group != NULL) { 631 spdk_nvme_poll_group_remove(qpair->poll_group, qpair); 632 } 633 634 TAILQ_REMOVE(&qpair->ctrlr->active_io_qpairs, qpair, tailq); 635 636 CU_ASSERT(qpair->num_outstanding_reqs == 0); 637 638 free(qpair); 639 640 return 0; 641 } 642 643 int 644 spdk_nvme_ctrlr_reset(struct spdk_nvme_ctrlr *ctrlr) 645 { 646 if (ctrlr->fail_reset) { 647 return -EIO; 648 } 649 650 ctrlr->is_failed = false; 651 652 return 0; 653 } 654 655 void 656 spdk_nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr) 657 { 658 ctrlr->is_failed = true; 659 } 660 661 int 662 spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr, 663 struct spdk_nvme_cmd *cmd, void *buf, uint32_t len, 664 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 665 { 666 return ut_submit_nvme_request(NULL, &ctrlr->adminq, cmd->opc, cb_fn, cb_arg); 667 } 668 669 int 670 spdk_nvme_ctrlr_cmd_abort_ext(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair, 671 void *cmd_cb_arg, 672 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 673 { 674 struct ut_nvme_req *req = NULL, *abort_req; 675 676 if (qpair == NULL) { 677 qpair = &ctrlr->adminq; 678 } 679 680 abort_req = calloc(1, sizeof(*abort_req)); 681 if (abort_req == NULL) { 682 return -ENOMEM; 683 } 684 685 TAILQ_FOREACH(req, &qpair->outstanding_reqs, tailq) { 686 if (req->cb_arg == cmd_cb_arg) { 687 break; 688 } 689 } 690 691 if (req == NULL) { 692 free(abort_req); 693 return -ENOENT; 694 } 695 696 req->cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST; 697 req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 698 699 abort_req->opc = SPDK_NVME_OPC_ABORT; 700 abort_req->cb_fn = cb_fn; 701 abort_req->cb_arg = cb_arg; 702 703 abort_req->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 704 abort_req->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 705 abort_req->cpl.cdw0 = 0; 706 707 TAILQ_INSERT_TAIL(&ctrlr->adminq.outstanding_reqs, abort_req, tailq); 708 ctrlr->adminq.num_outstanding_reqs++; 709 710 return 0; 711 } 712 713 int32_t 714 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 715 { 716 return spdk_nvme_qpair_process_completions(&ctrlr->adminq, 0); 717 } 718 719 uint32_t 720 spdk_nvme_ns_get_id(struct spdk_nvme_ns *ns) 721 { 722 return ns->id; 723 } 724 725 struct spdk_nvme_ctrlr * 726 spdk_nvme_ns_get_ctrlr(struct spdk_nvme_ns *ns) 727 { 728 return ns->ctrlr; 729 } 730 731 static inline struct spdk_nvme_ns_data * 732 _nvme_ns_get_data(struct spdk_nvme_ns *ns) 733 { 734 return &ns->ctrlr->nsdata[ns->id - 1]; 735 } 736 737 const struct spdk_nvme_ns_data * 738 spdk_nvme_ns_get_data(struct spdk_nvme_ns *ns) 739 { 740 return _nvme_ns_get_data(ns); 741 } 742 743 uint64_t 744 spdk_nvme_ns_get_num_sectors(struct spdk_nvme_ns *ns) 745 { 746 return _nvme_ns_get_data(ns)->nsze; 747 } 748 749 const struct spdk_uuid * 750 spdk_nvme_ns_get_uuid(const struct spdk_nvme_ns *ns) 751 { 752 return &ns->uuid; 753 } 754 755 int 756 spdk_nvme_ns_cmd_read_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, void *buffer, 757 void *metadata, uint64_t lba, uint32_t lba_count, 758 spdk_nvme_cmd_cb cb_fn, void *cb_arg, 759 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 760 { 761 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 762 } 763 764 int 765 spdk_nvme_ns_cmd_write_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 766 void *buffer, void *metadata, uint64_t lba, 767 uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, 768 uint32_t io_flags, uint16_t apptag_mask, uint16_t apptag) 769 { 770 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 771 } 772 773 int 774 spdk_nvme_ns_cmd_readv_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 775 uint64_t lba, uint32_t lba_count, 776 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 777 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 778 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 779 uint16_t apptag_mask, uint16_t apptag) 780 { 781 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_READ, cb_fn, cb_arg); 782 } 783 784 int 785 spdk_nvme_ns_cmd_writev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 786 uint64_t lba, uint32_t lba_count, 787 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 788 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 789 spdk_nvme_req_next_sge_cb next_sge_fn, void *metadata, 790 uint16_t apptag_mask, uint16_t apptag) 791 { 792 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_WRITE, cb_fn, cb_arg); 793 } 794 795 int 796 spdk_nvme_ns_cmd_comparev_with_md(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 797 uint64_t lba, uint32_t lba_count, 798 spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags, 799 spdk_nvme_req_reset_sgl_cb reset_sgl_fn, 800 spdk_nvme_req_next_sge_cb next_sge_fn, 801 void *metadata, uint16_t apptag_mask, uint16_t apptag) 802 { 803 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_COMPARE, cb_fn, cb_arg); 804 } 805 806 int 807 spdk_nvme_ns_cmd_dataset_management(struct spdk_nvme_ns *ns, struct spdk_nvme_qpair *qpair, 808 uint32_t type, const struct spdk_nvme_dsm_range *ranges, uint16_t num_ranges, 809 spdk_nvme_cmd_cb cb_fn, void *cb_arg) 810 { 811 return ut_submit_nvme_request(ns, qpair, SPDK_NVME_OPC_DATASET_MANAGEMENT, cb_fn, cb_arg); 812 } 813 814 struct spdk_nvme_poll_group * 815 spdk_nvme_poll_group_create(void *ctx, struct spdk_nvme_accel_fn_table *table) 816 { 817 struct spdk_nvme_poll_group *group; 818 819 group = calloc(1, sizeof(*group)); 820 if (group == NULL) { 821 return NULL; 822 } 823 824 group->ctx = ctx; 825 if (table != NULL) { 826 group->accel_fn_table = *table; 827 } 828 TAILQ_INIT(&group->qpairs); 829 830 return group; 831 } 832 833 int 834 spdk_nvme_poll_group_destroy(struct spdk_nvme_poll_group *group) 835 { 836 if (!TAILQ_EMPTY(&group->qpairs)) { 837 return -EBUSY; 838 } 839 840 free(group); 841 842 return 0; 843 } 844 845 int32_t 846 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, 847 uint32_t max_completions) 848 { 849 struct ut_nvme_req *req, *tmp; 850 uint32_t num_completions = 0; 851 852 TAILQ_FOREACH_SAFE(req, &qpair->outstanding_reqs, tailq, tmp) { 853 TAILQ_REMOVE(&qpair->outstanding_reqs, req, tailq); 854 qpair->num_outstanding_reqs--; 855 856 req->cb_fn(req->cb_arg, &req->cpl); 857 858 free(req); 859 num_completions++; 860 } 861 862 return num_completions; 863 } 864 865 int64_t 866 spdk_nvme_poll_group_process_completions(struct spdk_nvme_poll_group *group, 867 uint32_t completions_per_qpair, 868 spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb) 869 { 870 struct spdk_nvme_qpair *qpair, *tmp_qpair; 871 int64_t local_completions = 0, error_reason = 0, num_completions = 0; 872 873 SPDK_CU_ASSERT_FATAL(completions_per_qpair == 0); 874 875 if (disconnected_qpair_cb == NULL) { 876 return -EINVAL; 877 } 878 879 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) { 880 if (qpair->is_connected) { 881 local_completions = spdk_nvme_qpair_process_completions(qpair, 882 completions_per_qpair); 883 if (local_completions < 0 && error_reason == 0) { 884 error_reason = local_completions; 885 } else { 886 num_completions += local_completions; 887 assert(num_completions >= 0); 888 } 889 } 890 } 891 892 TAILQ_FOREACH_SAFE(qpair, &group->qpairs, poll_group_tailq, tmp_qpair) { 893 if (!qpair->is_connected) { 894 disconnected_qpair_cb(qpair, group->ctx); 895 } 896 } 897 898 return error_reason ? error_reason : num_completions; 899 } 900 901 int 902 spdk_nvme_poll_group_add(struct spdk_nvme_poll_group *group, 903 struct spdk_nvme_qpair *qpair) 904 { 905 CU_ASSERT(!qpair->is_connected); 906 907 qpair->poll_group = group; 908 TAILQ_INSERT_TAIL(&group->qpairs, qpair, poll_group_tailq); 909 910 return 0; 911 } 912 913 int 914 spdk_nvme_poll_group_remove(struct spdk_nvme_poll_group *group, 915 struct spdk_nvme_qpair *qpair) 916 { 917 CU_ASSERT(!qpair->is_connected); 918 919 TAILQ_REMOVE(&group->qpairs, qpair, poll_group_tailq); 920 921 return 0; 922 } 923 924 int 925 spdk_bdev_register(struct spdk_bdev *bdev) 926 { 927 return g_ut_register_bdev_status; 928 } 929 930 void 931 spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 932 { 933 int rc; 934 935 rc = bdev->fn_table->destruct(bdev->ctxt); 936 if (rc <= 0 && cb_fn != NULL) { 937 cb_fn(cb_arg, rc); 938 } 939 } 940 941 int 942 spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size) 943 { 944 bdev->blockcnt = size; 945 946 return 0; 947 } 948 949 struct spdk_io_channel * 950 spdk_bdev_io_get_io_channel(struct spdk_bdev_io *bdev_io) 951 { 952 return (struct spdk_io_channel *)bdev_io->internal.ch; 953 } 954 955 void 956 spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status) 957 { 958 bdev_io->internal.status = status; 959 bdev_io->internal.in_submit_request = false; 960 } 961 962 void 963 spdk_bdev_io_complete_nvme_status(struct spdk_bdev_io *bdev_io, uint32_t cdw0, int sct, int sc) 964 { 965 if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_SUCCESS) { 966 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS; 967 } else if (sct == SPDK_NVME_SCT_GENERIC && sc == SPDK_NVME_SC_ABORTED_BY_REQUEST) { 968 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_ABORTED; 969 } else { 970 bdev_io->internal.status = SPDK_BDEV_IO_STATUS_NVME_ERROR; 971 } 972 973 bdev_io->internal.error.nvme.cdw0 = cdw0; 974 bdev_io->internal.error.nvme.sct = sct; 975 bdev_io->internal.error.nvme.sc = sc; 976 977 spdk_bdev_io_complete(bdev_io, bdev_io->internal.status); 978 } 979 980 void 981 spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len) 982 { 983 struct spdk_io_channel *ch = spdk_bdev_io_get_io_channel(bdev_io); 984 985 ut_bdev_io_set_buf(bdev_io); 986 987 cb(ch, bdev_io, true); 988 } 989 990 static void 991 test_create_ctrlr(void) 992 { 993 struct spdk_nvme_transport_id trid = {}; 994 struct spdk_nvme_ctrlr ctrlr = {}; 995 int rc; 996 997 ut_init_trid(&trid); 998 999 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1000 1001 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL); 1002 1003 rc = bdev_nvme_delete("nvme0", NULL); 1004 CU_ASSERT(rc == 0); 1005 1006 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") != NULL); 1007 1008 poll_threads(); 1009 1010 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1011 } 1012 1013 static void 1014 test_reset_ctrlr(void) 1015 { 1016 struct spdk_nvme_transport_id trid = {}; 1017 struct spdk_nvme_ctrlr ctrlr = {}; 1018 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1019 struct nvme_bdev_ctrlr_trid *curr_trid; 1020 struct spdk_io_channel *ch1, *ch2; 1021 struct nvme_io_path *io_path1, *io_path2; 1022 int rc; 1023 1024 ut_init_trid(&trid); 1025 TAILQ_INIT(&ctrlr.active_io_qpairs); 1026 1027 set_thread(0); 1028 1029 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1030 1031 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1032 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1033 1034 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1035 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1036 1037 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1038 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1039 1040 io_path1 = spdk_io_channel_get_ctx(ch1); 1041 CU_ASSERT(io_path1->qpair != NULL); 1042 1043 set_thread(1); 1044 1045 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1046 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1047 1048 io_path2 = spdk_io_channel_get_ctx(ch2); 1049 CU_ASSERT(io_path2->qpair != NULL); 1050 1051 /* Reset starts from thread 1. */ 1052 set_thread(1); 1053 1054 /* Case 1: ctrlr is already being destructed. */ 1055 nvme_bdev_ctrlr->destruct = true; 1056 1057 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1058 CU_ASSERT(rc == -EBUSY); 1059 1060 /* Case 2: reset is in progress. */ 1061 nvme_bdev_ctrlr->destruct = false; 1062 nvme_bdev_ctrlr->resetting = true; 1063 1064 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1065 CU_ASSERT(rc == -EAGAIN); 1066 1067 /* Case 3: reset completes successfully. */ 1068 nvme_bdev_ctrlr->resetting = false; 1069 curr_trid->is_failed = true; 1070 ctrlr.is_failed = true; 1071 1072 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1073 CU_ASSERT(rc == 0); 1074 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1075 CU_ASSERT(io_path1->qpair != NULL); 1076 CU_ASSERT(io_path2->qpair != NULL); 1077 1078 poll_thread_times(0, 1); 1079 CU_ASSERT(io_path1->qpair == NULL); 1080 CU_ASSERT(io_path2->qpair != NULL); 1081 1082 poll_thread_times(1, 1); 1083 CU_ASSERT(io_path1->qpair == NULL); 1084 CU_ASSERT(io_path2->qpair == NULL); 1085 CU_ASSERT(ctrlr.is_failed == true); 1086 1087 poll_thread_times(1, 1); 1088 CU_ASSERT(ctrlr.is_failed == false); 1089 1090 poll_thread_times(0, 1); 1091 CU_ASSERT(io_path1->qpair != NULL); 1092 CU_ASSERT(io_path2->qpair == NULL); 1093 1094 poll_thread_times(1, 1); 1095 CU_ASSERT(io_path1->qpair != NULL); 1096 CU_ASSERT(io_path2->qpair != NULL); 1097 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1098 CU_ASSERT(curr_trid->is_failed == true); 1099 1100 poll_thread_times(1, 1); 1101 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1102 CU_ASSERT(curr_trid->is_failed == false); 1103 1104 spdk_put_io_channel(ch2); 1105 1106 set_thread(0); 1107 1108 spdk_put_io_channel(ch1); 1109 1110 poll_threads(); 1111 1112 rc = bdev_nvme_delete("nvme0", NULL); 1113 CU_ASSERT(rc == 0); 1114 1115 poll_threads(); 1116 1117 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1118 } 1119 1120 static void 1121 test_race_between_reset_and_destruct_ctrlr(void) 1122 { 1123 struct spdk_nvme_transport_id trid = {}; 1124 struct spdk_nvme_ctrlr ctrlr = {}; 1125 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1126 struct spdk_io_channel *ch1, *ch2; 1127 int rc; 1128 1129 ut_init_trid(&trid); 1130 TAILQ_INIT(&ctrlr.active_io_qpairs); 1131 1132 set_thread(0); 1133 1134 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1135 1136 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1137 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1138 1139 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1140 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1141 1142 set_thread(1); 1143 1144 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1145 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1146 1147 /* Reset starts from thread 1. */ 1148 set_thread(1); 1149 1150 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1151 CU_ASSERT(rc == 0); 1152 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1153 1154 /* Try destructing ctrlr while ctrlr is being reset, but it will be deferred. */ 1155 set_thread(0); 1156 1157 rc = bdev_nvme_delete("nvme0", NULL); 1158 CU_ASSERT(rc == 0); 1159 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1160 CU_ASSERT(nvme_bdev_ctrlr->destruct == true); 1161 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1162 1163 poll_threads(); 1164 1165 /* Reset completed but ctrlr is not still destructed yet. */ 1166 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1167 CU_ASSERT(nvme_bdev_ctrlr->destruct == true); 1168 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1169 1170 /* New reset request is rejected. */ 1171 rc = _bdev_nvme_reset(nvme_bdev_ctrlr); 1172 CU_ASSERT(rc == -EBUSY); 1173 1174 /* Additional polling called spdk_io_device_unregister() to ctrlr, 1175 * However there are two channels and destruct is not completed yet. 1176 */ 1177 poll_threads(); 1178 1179 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1180 1181 set_thread(0); 1182 1183 spdk_put_io_channel(ch1); 1184 1185 set_thread(1); 1186 1187 spdk_put_io_channel(ch2); 1188 1189 poll_threads(); 1190 1191 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1192 } 1193 1194 static void 1195 test_failover_ctrlr(void) 1196 { 1197 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}; 1198 struct spdk_nvme_ctrlr ctrlr = {}; 1199 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1200 struct nvme_bdev_ctrlr_trid *curr_trid, *next_trid; 1201 struct spdk_io_channel *ch1, *ch2; 1202 int rc; 1203 1204 ut_init_trid(&trid1); 1205 ut_init_trid2(&trid2); 1206 TAILQ_INIT(&ctrlr.active_io_qpairs); 1207 1208 set_thread(0); 1209 1210 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1211 1212 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1213 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1214 1215 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1216 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1217 1218 set_thread(1); 1219 1220 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1221 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1222 1223 /* First, test one trid case. */ 1224 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1225 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1226 1227 /* Failover starts from thread 1. */ 1228 set_thread(1); 1229 1230 /* Case 1: ctrlr is already being destructed. */ 1231 nvme_bdev_ctrlr->destruct = true; 1232 1233 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1234 CU_ASSERT(rc == 0); 1235 CU_ASSERT(curr_trid->is_failed == false); 1236 1237 /* Case 2: reset is in progress. */ 1238 nvme_bdev_ctrlr->destruct = false; 1239 nvme_bdev_ctrlr->resetting = true; 1240 1241 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1242 CU_ASSERT(rc == 0); 1243 1244 /* Case 3: failover is in progress. */ 1245 nvme_bdev_ctrlr->failover_in_progress = true; 1246 1247 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1248 CU_ASSERT(rc == 0); 1249 CU_ASSERT(curr_trid->is_failed == false); 1250 1251 /* Case 4: reset completes successfully. */ 1252 nvme_bdev_ctrlr->resetting = false; 1253 nvme_bdev_ctrlr->failover_in_progress = false; 1254 1255 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1256 CU_ASSERT(rc == 0); 1257 1258 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1259 CU_ASSERT(curr_trid->is_failed == true); 1260 1261 poll_threads(); 1262 1263 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1264 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1265 1266 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1267 CU_ASSERT(curr_trid->is_failed == false); 1268 1269 set_thread(0); 1270 1271 /* Second, test two trids case. */ 1272 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2, NULL); 1273 1274 curr_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1275 SPDK_CU_ASSERT_FATAL(curr_trid != NULL); 1276 CU_ASSERT(&curr_trid->trid == nvme_bdev_ctrlr->connected_trid); 1277 CU_ASSERT(spdk_nvme_transport_id_compare(&curr_trid->trid, &trid1) == 0); 1278 1279 /* Failover starts from thread 1. */ 1280 set_thread(1); 1281 1282 /* Case 5: reset is in progress. */ 1283 nvme_bdev_ctrlr->resetting = true; 1284 1285 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1286 CU_ASSERT(rc == -EAGAIN); 1287 1288 /* Case 5: failover is in progress. */ 1289 nvme_bdev_ctrlr->failover_in_progress = true; 1290 1291 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1292 CU_ASSERT(rc == 0); 1293 1294 /* Case 6: failover completes successfully. */ 1295 nvme_bdev_ctrlr->resetting = false; 1296 nvme_bdev_ctrlr->failover_in_progress = false; 1297 1298 rc = bdev_nvme_failover(nvme_bdev_ctrlr, false); 1299 CU_ASSERT(rc == 0); 1300 1301 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1302 CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == true); 1303 1304 next_trid = TAILQ_FIRST(&nvme_bdev_ctrlr->trids); 1305 SPDK_CU_ASSERT_FATAL(next_trid != NULL); 1306 CU_ASSERT(next_trid != curr_trid); 1307 CU_ASSERT(&next_trid->trid == nvme_bdev_ctrlr->connected_trid); 1308 CU_ASSERT(spdk_nvme_transport_id_compare(&next_trid->trid, &trid2) == 0); 1309 1310 poll_threads(); 1311 1312 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1313 CU_ASSERT(nvme_bdev_ctrlr->failover_in_progress == false); 1314 1315 spdk_put_io_channel(ch2); 1316 1317 set_thread(0); 1318 1319 spdk_put_io_channel(ch1); 1320 1321 poll_threads(); 1322 1323 rc = bdev_nvme_delete("nvme0", NULL); 1324 CU_ASSERT(rc == 0); 1325 1326 poll_threads(); 1327 1328 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1329 } 1330 1331 static void 1332 attach_ctrlr_done(void *cb_ctx, size_t bdev_count, int rc) 1333 { 1334 CU_ASSERT(rc == g_ut_attach_ctrlr_status); 1335 CU_ASSERT(bdev_count == g_ut_attach_bdev_count); 1336 } 1337 1338 static void 1339 test_pending_reset(void) 1340 { 1341 struct spdk_nvme_transport_id trid = {}; 1342 struct spdk_nvme_host_id hostid = {}; 1343 struct spdk_nvme_ctrlr *ctrlr; 1344 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1345 const int STRING_SIZE = 32; 1346 const char *attached_names[STRING_SIZE]; 1347 struct spdk_bdev_io *first_bdev_io, *second_bdev_io; 1348 struct spdk_io_channel *ch1, *ch2; 1349 struct nvme_io_path *io_path1, *io_path2; 1350 int rc; 1351 1352 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1353 ut_init_trid(&trid); 1354 1355 first_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 1356 SPDK_CU_ASSERT_FATAL(first_bdev_io != NULL); 1357 first_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1358 1359 second_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 1360 SPDK_CU_ASSERT_FATAL(second_bdev_io != NULL); 1361 second_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED; 1362 1363 set_thread(0); 1364 1365 ctrlr = ut_attach_ctrlr(&trid, 0); 1366 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1367 1368 g_ut_attach_ctrlr_status = 0; 1369 g_ut_attach_bdev_count = 0; 1370 1371 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1372 attach_ctrlr_done, NULL, NULL); 1373 CU_ASSERT(rc == 0); 1374 1375 spdk_delay_us(1000); 1376 poll_threads(); 1377 1378 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1379 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1380 1381 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 1382 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1383 1384 io_path1 = spdk_io_channel_get_ctx(ch1); 1385 1386 set_thread(1); 1387 1388 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 1389 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1390 1391 io_path2 = spdk_io_channel_get_ctx(ch2); 1392 1393 /* The first reset request is submitted on thread 1, and the second reset request 1394 * is submitted on thread 0 while processing the first request. 1395 */ 1396 rc = bdev_nvme_reset(io_path2, first_bdev_io); 1397 CU_ASSERT(rc == 0); 1398 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1399 CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets)); 1400 1401 set_thread(0); 1402 1403 rc = bdev_nvme_reset(io_path1, second_bdev_io); 1404 CU_ASSERT(rc == 0); 1405 CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io); 1406 1407 poll_threads(); 1408 1409 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1410 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1411 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1412 1413 /* The first reset request is submitted on thread 1, and the second reset request 1414 * is submitted on thread 0 while processing the first request. 1415 * 1416 * The difference from the above scenario is that the controller is removed while 1417 * processing the first request. Hence both reset requests should fail. 1418 */ 1419 set_thread(1); 1420 1421 rc = bdev_nvme_reset(io_path2, first_bdev_io); 1422 CU_ASSERT(rc == 0); 1423 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1424 CU_ASSERT(TAILQ_EMPTY(&io_path2->pending_resets)); 1425 1426 set_thread(0); 1427 1428 rc = bdev_nvme_reset(io_path1, second_bdev_io); 1429 CU_ASSERT(rc == 0); 1430 CU_ASSERT(TAILQ_FIRST(&io_path1->pending_resets) == second_bdev_io); 1431 1432 ctrlr->fail_reset = true; 1433 1434 poll_threads(); 1435 1436 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1437 CU_ASSERT(first_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1438 CU_ASSERT(second_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 1439 1440 spdk_put_io_channel(ch1); 1441 1442 set_thread(1); 1443 1444 spdk_put_io_channel(ch2); 1445 1446 poll_threads(); 1447 1448 set_thread(0); 1449 1450 rc = bdev_nvme_delete("nvme0", NULL); 1451 CU_ASSERT(rc == 0); 1452 1453 poll_threads(); 1454 1455 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1456 1457 free(first_bdev_io); 1458 free(second_bdev_io); 1459 } 1460 1461 static void 1462 test_attach_ctrlr(void) 1463 { 1464 struct spdk_nvme_transport_id trid = {}; 1465 struct spdk_nvme_host_id hostid = {}; 1466 struct spdk_nvme_ctrlr *ctrlr; 1467 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1468 const int STRING_SIZE = 32; 1469 const char *attached_names[STRING_SIZE]; 1470 struct nvme_bdev *nbdev; 1471 int rc; 1472 1473 set_thread(0); 1474 1475 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1476 ut_init_trid(&trid); 1477 1478 /* If ctrlr fails, no nvme_bdev_ctrlr is created. Failed ctrlr is removed 1479 * by probe polling. 1480 */ 1481 ctrlr = ut_attach_ctrlr(&trid, 0); 1482 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1483 1484 ctrlr->is_failed = true; 1485 g_ut_attach_ctrlr_status = -EIO; 1486 g_ut_attach_bdev_count = 0; 1487 1488 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1489 attach_ctrlr_done, NULL, NULL); 1490 CU_ASSERT(rc == 0); 1491 1492 spdk_delay_us(1000); 1493 poll_threads(); 1494 1495 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1496 1497 /* If ctrlr has no namespace, one nvme_bdev_ctrlr with no namespace is created */ 1498 ctrlr = ut_attach_ctrlr(&trid, 0); 1499 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1500 1501 g_ut_attach_ctrlr_status = 0; 1502 1503 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1504 attach_ctrlr_done, NULL, NULL); 1505 CU_ASSERT(rc == 0); 1506 1507 spdk_delay_us(1000); 1508 poll_threads(); 1509 1510 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1511 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1512 CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); 1513 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 0); 1514 1515 rc = bdev_nvme_delete("nvme0", NULL); 1516 CU_ASSERT(rc == 0); 1517 1518 poll_threads(); 1519 1520 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1521 1522 /* If ctrlr has one namespace, one nvme_bdev_ctrlr with one namespace and 1523 * one nvme_bdev is created. 1524 */ 1525 ctrlr = ut_attach_ctrlr(&trid, 1); 1526 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1527 1528 g_ut_attach_bdev_count = 1; 1529 1530 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1531 attach_ctrlr_done, NULL, NULL); 1532 CU_ASSERT(rc == 0); 1533 1534 spdk_delay_us(1000); 1535 poll_threads(); 1536 1537 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1538 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1539 CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); 1540 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1); 1541 1542 CU_ASSERT(attached_names[0] != NULL && strcmp(attached_names[0], "nvme0n1") == 0); 1543 attached_names[0] = NULL; 1544 1545 nbdev = nvme_bdev_ctrlr->namespaces[0]->bdev; 1546 SPDK_CU_ASSERT_FATAL(nbdev != NULL); 1547 CU_ASSERT(bdev_nvme_get_ctrlr(&nbdev->disk) == ctrlr); 1548 1549 rc = bdev_nvme_delete("nvme0", NULL); 1550 CU_ASSERT(rc == 0); 1551 1552 poll_threads(); 1553 1554 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1555 1556 /* Ctrlr has one namespace but one nvme_bdev_ctrlr with no namespace is 1557 * created because creating one nvme_bdev failed. 1558 */ 1559 ctrlr = ut_attach_ctrlr(&trid, 1); 1560 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1561 1562 g_ut_register_bdev_status = -EINVAL; 1563 g_ut_attach_bdev_count = 0; 1564 1565 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1566 attach_ctrlr_done, NULL, NULL); 1567 CU_ASSERT(rc == 0); 1568 1569 spdk_delay_us(1000); 1570 poll_threads(); 1571 1572 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1573 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1574 CU_ASSERT(nvme_bdev_ctrlr->ctrlr == ctrlr); 1575 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 1); 1576 1577 CU_ASSERT(attached_names[0] == NULL); 1578 1579 rc = bdev_nvme_delete("nvme0", NULL); 1580 CU_ASSERT(rc == 0); 1581 1582 poll_threads(); 1583 1584 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1585 1586 g_ut_register_bdev_status = 0; 1587 } 1588 1589 static void 1590 test_reconnect_qpair(void) 1591 { 1592 struct spdk_nvme_transport_id trid = {}; 1593 struct spdk_nvme_ctrlr ctrlr = {}; 1594 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1595 struct spdk_io_channel *ch; 1596 struct nvme_io_path *io_path; 1597 int rc; 1598 1599 set_thread(0); 1600 1601 ut_init_trid(&trid); 1602 TAILQ_INIT(&ctrlr.active_io_qpairs); 1603 1604 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 1605 1606 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1607 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1608 1609 ch = spdk_get_io_channel(nvme_bdev_ctrlr); 1610 SPDK_CU_ASSERT_FATAL(ch != NULL); 1611 1612 io_path = spdk_io_channel_get_ctx(ch); 1613 CU_ASSERT(io_path->qpair != NULL); 1614 CU_ASSERT(io_path->group != NULL); 1615 CU_ASSERT(io_path->group->group != NULL); 1616 CU_ASSERT(io_path->group->poller != NULL); 1617 1618 /* Test if the disconnected qpair is reconnected. */ 1619 io_path->qpair->is_connected = false; 1620 1621 poll_threads(); 1622 1623 CU_ASSERT(io_path->qpair->is_connected == true); 1624 1625 /* If the ctrlr is failed, reconnecting qpair should fail too. */ 1626 io_path->qpair->is_connected = false; 1627 ctrlr.is_failed = true; 1628 1629 poll_threads(); 1630 1631 CU_ASSERT(io_path->qpair->is_connected == false); 1632 1633 spdk_put_io_channel(ch); 1634 1635 poll_threads(); 1636 1637 rc = bdev_nvme_delete("nvme0", NULL); 1638 CU_ASSERT(rc == 0); 1639 1640 poll_threads(); 1641 1642 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1643 } 1644 1645 static void 1646 test_aer_cb(void) 1647 { 1648 struct spdk_nvme_transport_id trid = {}; 1649 struct spdk_nvme_host_id hostid = {}; 1650 struct spdk_nvme_ctrlr *ctrlr; 1651 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1652 struct nvme_bdev *bdev; 1653 const int STRING_SIZE = 32; 1654 const char *attached_names[STRING_SIZE]; 1655 union spdk_nvme_async_event_completion event = {}; 1656 struct spdk_nvme_cpl cpl = {}; 1657 int rc; 1658 1659 set_thread(0); 1660 1661 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1662 ut_init_trid(&trid); 1663 1664 /* Attach a ctrlr, whose max number of namespaces is 4, and 2nd, 3rd, and 4th 1665 * namespaces are populated. 1666 */ 1667 ctrlr = ut_attach_ctrlr(&trid, 4); 1668 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1669 1670 ctrlr->ns[0].is_active = false; 1671 1672 g_ut_attach_ctrlr_status = 0; 1673 g_ut_attach_bdev_count = 3; 1674 1675 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1676 attach_ctrlr_done, NULL, NULL); 1677 CU_ASSERT(rc == 0); 1678 1679 spdk_delay_us(1000); 1680 poll_threads(); 1681 1682 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1683 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1684 1685 CU_ASSERT(nvme_bdev_ctrlr->num_ns == 4); 1686 CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == false); 1687 CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true); 1688 CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == true); 1689 CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true); 1690 1691 bdev = nvme_bdev_ctrlr->namespaces[3]->bdev; 1692 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1693 CU_ASSERT(bdev->disk.blockcnt == 1024); 1694 1695 /* Dynamically populate 1st namespace and depopulate 3rd namespace, and 1696 * change the size of the 4th namespace. 1697 */ 1698 ctrlr->ns[0].is_active = true; 1699 ctrlr->ns[2].is_active = false; 1700 ctrlr->nsdata[3].nsze = 2048; 1701 1702 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_NOTICE; 1703 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_NS_ATTR_CHANGED; 1704 cpl.cdw0 = event.raw; 1705 1706 aer_cb(nvme_bdev_ctrlr, &cpl); 1707 1708 CU_ASSERT(nvme_bdev_ctrlr->namespaces[0]->populated == true); 1709 CU_ASSERT(nvme_bdev_ctrlr->namespaces[1]->populated == true); 1710 CU_ASSERT(nvme_bdev_ctrlr->namespaces[2]->populated == false); 1711 CU_ASSERT(nvme_bdev_ctrlr->namespaces[3]->populated == true); 1712 CU_ASSERT(bdev->disk.blockcnt == 2048); 1713 1714 rc = bdev_nvme_delete("nvme0", NULL); 1715 CU_ASSERT(rc == 0); 1716 1717 poll_threads(); 1718 1719 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1720 } 1721 1722 static void 1723 ut_test_submit_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1724 enum spdk_bdev_io_type io_type) 1725 { 1726 struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); 1727 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; 1728 struct spdk_nvme_ns *ns = NULL; 1729 struct spdk_nvme_qpair *qpair = NULL; 1730 1731 CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); 1732 1733 bdev_io->type = io_type; 1734 bdev_io->internal.in_submit_request = true; 1735 1736 bdev_nvme_submit_request(ch, bdev_io); 1737 1738 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1739 CU_ASSERT(qpair->num_outstanding_reqs == 1); 1740 1741 poll_threads(); 1742 1743 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1744 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1745 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1746 } 1747 1748 static void 1749 ut_test_submit_nop(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1750 enum spdk_bdev_io_type io_type) 1751 { 1752 struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); 1753 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; 1754 struct spdk_nvme_ns *ns = NULL; 1755 struct spdk_nvme_qpair *qpair = NULL; 1756 1757 CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); 1758 1759 bdev_io->type = io_type; 1760 bdev_io->internal.in_submit_request = true; 1761 1762 bdev_nvme_submit_request(ch, bdev_io); 1763 1764 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1765 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1766 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1767 } 1768 1769 static void 1770 ut_test_submit_fused_nvme_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 1771 { 1772 struct nvme_io_path *io_path = spdk_io_channel_get_ctx(ch); 1773 struct nvme_bdev_io *bio = (struct nvme_bdev_io *)bdev_io->driver_ctx; 1774 struct ut_nvme_req *req; 1775 struct nvme_bdev *nbdev = (struct nvme_bdev *)bdev_io->bdev->ctxt; 1776 struct spdk_nvme_ns *ns = NULL; 1777 struct spdk_nvme_qpair *qpair = NULL; 1778 1779 CU_ASSERT(bdev_nvme_find_io_path(nbdev, io_path, &ns, &qpair)); 1780 1781 /* Only compare and write now. */ 1782 bdev_io->type = SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE; 1783 bdev_io->internal.in_submit_request = true; 1784 1785 bdev_nvme_submit_request(ch, bdev_io); 1786 1787 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1788 CU_ASSERT(qpair->num_outstanding_reqs == 2); 1789 CU_ASSERT(bio->first_fused_submitted == true); 1790 1791 /* First outstanding request is compare operation. */ 1792 req = TAILQ_FIRST(&io_path->qpair->outstanding_reqs); 1793 SPDK_CU_ASSERT_FATAL(req != NULL); 1794 CU_ASSERT(req->opc == SPDK_NVME_OPC_COMPARE); 1795 req->cpl.cdw0 = SPDK_NVME_OPC_COMPARE; 1796 1797 poll_threads(); 1798 1799 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1800 CU_ASSERT(bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 1801 CU_ASSERT(qpair->num_outstanding_reqs == 0); 1802 } 1803 1804 static void 1805 ut_test_submit_admin_cmd(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, 1806 struct spdk_nvme_ctrlr *ctrlr) 1807 { 1808 bdev_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 1809 bdev_io->internal.in_submit_request = true; 1810 bdev_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 1811 1812 bdev_nvme_submit_request(ch, bdev_io); 1813 1814 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1815 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 1816 1817 spdk_delay_us(10000); 1818 poll_thread_times(1, 1); 1819 1820 CU_ASSERT(bdev_io->internal.in_submit_request == true); 1821 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 1822 1823 poll_thread_times(0, 1); 1824 1825 CU_ASSERT(bdev_io->internal.in_submit_request == false); 1826 } 1827 1828 static void 1829 test_submit_nvme_cmd(void) 1830 { 1831 struct spdk_nvme_transport_id trid = {}; 1832 struct spdk_nvme_host_id hostid = {}; 1833 struct spdk_nvme_ctrlr *ctrlr; 1834 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1835 const int STRING_SIZE = 32; 1836 const char *attached_names[STRING_SIZE]; 1837 struct nvme_bdev *bdev; 1838 struct spdk_bdev_io *bdev_io; 1839 struct spdk_io_channel *ch; 1840 int rc; 1841 1842 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 1843 ut_init_trid(&trid); 1844 1845 set_thread(1); 1846 1847 ctrlr = ut_attach_ctrlr(&trid, 1); 1848 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 1849 1850 g_ut_attach_ctrlr_status = 0; 1851 g_ut_attach_bdev_count = 1; 1852 1853 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 1854 attach_ctrlr_done, NULL, NULL); 1855 CU_ASSERT(rc == 0); 1856 1857 spdk_delay_us(1000); 1858 poll_threads(); 1859 1860 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1861 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1862 1863 bdev = nvme_bdev_ctrlr->namespaces[0]->bdev; 1864 SPDK_CU_ASSERT_FATAL(bdev != NULL); 1865 1866 set_thread(0); 1867 1868 ch = spdk_get_io_channel(nvme_bdev_ctrlr); 1869 SPDK_CU_ASSERT_FATAL(ch != NULL); 1870 1871 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 1872 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 1873 bdev_io->bdev = &bdev->disk; 1874 bdev_io->internal.ch = (struct spdk_bdev_channel *)ch; 1875 1876 bdev_io->u.bdev.iovs = NULL; 1877 1878 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 1879 1880 ut_bdev_io_set_buf(bdev_io); 1881 1882 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_READ); 1883 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_WRITE); 1884 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_COMPARE); 1885 ut_test_submit_nvme_cmd(ch, bdev_io, SPDK_BDEV_IO_TYPE_UNMAP); 1886 1887 ut_test_submit_nop(ch, bdev_io, SPDK_BDEV_IO_TYPE_FLUSH); 1888 1889 ut_test_submit_fused_nvme_cmd(ch, bdev_io); 1890 1891 ut_test_submit_admin_cmd(ch, bdev_io, ctrlr); 1892 1893 free(bdev_io); 1894 1895 spdk_put_io_channel(ch); 1896 1897 poll_threads(); 1898 1899 set_thread(1); 1900 1901 rc = bdev_nvme_delete("nvme0", NULL); 1902 CU_ASSERT(rc == 0); 1903 1904 poll_threads(); 1905 1906 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1907 } 1908 1909 static void 1910 test_remove_trid(void) 1911 { 1912 struct spdk_nvme_transport_id trid1 = {}, trid2 = {}, trid3 = {}; 1913 struct spdk_nvme_ctrlr ctrlr = {}; 1914 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 1915 struct nvme_bdev_ctrlr_trid *ctrid; 1916 int rc; 1917 1918 ut_init_trid(&trid1); 1919 ut_init_trid2(&trid2); 1920 ut_init_trid3(&trid3); 1921 1922 set_thread(0); 1923 1924 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1925 1926 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1927 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1928 1929 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2, NULL); 1930 1931 /* trid3 is not in the registered list. */ 1932 rc = bdev_nvme_delete("nvme0", &trid3); 1933 CU_ASSERT(rc == -ENXIO); 1934 1935 /* trid2 is not used, and simply removed. */ 1936 rc = bdev_nvme_delete("nvme0", &trid2); 1937 CU_ASSERT(rc == 0); 1938 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1939 TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) { 1940 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid2) != 0); 1941 } 1942 1943 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid3, NULL); 1944 1945 /* trid1 is currently used and trid3 is an alternative path. 1946 * If we remove trid1, path is changed to trid3. 1947 */ 1948 rc = bdev_nvme_delete("nvme0", &trid1); 1949 CU_ASSERT(rc == 0); 1950 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1951 CU_ASSERT(nvme_bdev_ctrlr->resetting == true); 1952 TAILQ_FOREACH(ctrid, &nvme_bdev_ctrlr->trids, link) { 1953 CU_ASSERT(spdk_nvme_transport_id_compare(&ctrid->trid, &trid1) != 0); 1954 } 1955 CU_ASSERT(spdk_nvme_transport_id_compare(nvme_bdev_ctrlr->connected_trid, &trid3) == 0); 1956 1957 poll_threads(); 1958 1959 CU_ASSERT(nvme_bdev_ctrlr->resetting == false); 1960 1961 /* trid3 is the current and only path. If we remove trid3, the corresponding 1962 * nvme_bdev_ctrlr is removed. 1963 */ 1964 rc = bdev_nvme_delete("nvme0", &trid3); 1965 CU_ASSERT(rc == 0); 1966 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1967 1968 poll_threads(); 1969 1970 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1971 1972 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid1, 0, NULL); 1973 1974 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 1975 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 1976 1977 bdev_nvme_add_secondary_trid(nvme_bdev_ctrlr, &ctrlr, &trid2, NULL); 1978 1979 /* If trid is not specified, nvme_bdev_ctrlr itself is removed. */ 1980 rc = bdev_nvme_delete("nvme0", NULL); 1981 CU_ASSERT(rc == 0); 1982 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == nvme_bdev_ctrlr); 1983 1984 poll_threads(); 1985 1986 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 1987 } 1988 1989 static void 1990 test_abort(void) 1991 { 1992 struct spdk_nvme_transport_id trid = {}; 1993 struct spdk_nvme_host_id hostid = {}; 1994 struct spdk_nvme_ctrlr *ctrlr; 1995 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 1996 const int STRING_SIZE = 32; 1997 const char *attached_names[STRING_SIZE]; 1998 struct nvme_bdev *bdev; 1999 struct spdk_bdev_io *write_io, *admin_io, *abort_io; 2000 struct spdk_io_channel *ch1, *ch2; 2001 struct nvme_io_path *io_path1; 2002 int rc; 2003 2004 /* Create ctrlr on thread 1 and submit I/O and admin requests to be aborted on 2005 * thread 0. Aborting I/O requests are submitted on thread 0. Aborting admin requests 2006 * are submitted on thread 1. Both should succeed. 2007 */ 2008 2009 ut_init_trid(&trid); 2010 2011 ctrlr = ut_attach_ctrlr(&trid, 1); 2012 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2013 2014 g_ut_attach_ctrlr_status = 0; 2015 g_ut_attach_bdev_count = 1; 2016 2017 set_thread(1); 2018 2019 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 2020 attach_ctrlr_done, NULL, NULL); 2021 CU_ASSERT(rc == 0); 2022 2023 spdk_delay_us(1000); 2024 poll_threads(); 2025 2026 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 2027 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 2028 2029 bdev = nvme_bdev_ctrlr->namespaces[0]->bdev; 2030 SPDK_CU_ASSERT_FATAL(bdev != NULL); 2031 2032 write_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 2033 SPDK_CU_ASSERT_FATAL(write_io != NULL); 2034 write_io->bdev = &bdev->disk; 2035 write_io->type = SPDK_BDEV_IO_TYPE_WRITE; 2036 ut_bdev_io_set_buf(write_io); 2037 2038 admin_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 2039 SPDK_CU_ASSERT_FATAL(admin_io != NULL); 2040 admin_io->bdev = &bdev->disk; 2041 admin_io->type = SPDK_BDEV_IO_TYPE_NVME_ADMIN; 2042 admin_io->u.nvme_passthru.cmd.opc = SPDK_NVME_OPC_GET_FEATURES; 2043 2044 abort_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct nvme_bdev_io)); 2045 SPDK_CU_ASSERT_FATAL(abort_io != NULL); 2046 abort_io->bdev = &bdev->disk; 2047 abort_io->type = SPDK_BDEV_IO_TYPE_ABORT; 2048 2049 set_thread(0); 2050 2051 ch1 = spdk_get_io_channel(nvme_bdev_ctrlr); 2052 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 2053 io_path1 = spdk_io_channel_get_ctx(ch1); 2054 2055 set_thread(1); 2056 2057 ch2 = spdk_get_io_channel(nvme_bdev_ctrlr); 2058 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 2059 2060 write_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2061 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2062 2063 /* Aborting the already completed request should fail. */ 2064 write_io->internal.in_submit_request = true; 2065 bdev_nvme_submit_request(ch1, write_io); 2066 poll_threads(); 2067 2068 CU_ASSERT(write_io->internal.in_submit_request == false); 2069 2070 abort_io->u.abort.bio_to_abort = write_io; 2071 abort_io->internal.in_submit_request = true; 2072 2073 bdev_nvme_submit_request(ch1, abort_io); 2074 2075 poll_threads(); 2076 2077 CU_ASSERT(abort_io->internal.in_submit_request == false); 2078 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2079 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2080 2081 admin_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2082 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2083 2084 admin_io->internal.in_submit_request = true; 2085 bdev_nvme_submit_request(ch1, admin_io); 2086 spdk_delay_us(10000); 2087 poll_threads(); 2088 2089 CU_ASSERT(admin_io->internal.in_submit_request == false); 2090 2091 abort_io->u.abort.bio_to_abort = admin_io; 2092 abort_io->internal.in_submit_request = true; 2093 2094 bdev_nvme_submit_request(ch2, abort_io); 2095 2096 poll_threads(); 2097 2098 CU_ASSERT(abort_io->internal.in_submit_request == false); 2099 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED); 2100 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2101 2102 /* Aborting the write request should succeed. */ 2103 write_io->internal.in_submit_request = true; 2104 bdev_nvme_submit_request(ch1, write_io); 2105 2106 CU_ASSERT(write_io->internal.in_submit_request == true); 2107 CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 1); 2108 2109 abort_io->internal.ch = (struct spdk_bdev_channel *)ch1; 2110 abort_io->u.abort.bio_to_abort = write_io; 2111 abort_io->internal.in_submit_request = true; 2112 2113 bdev_nvme_submit_request(ch1, abort_io); 2114 2115 spdk_delay_us(10000); 2116 poll_threads(); 2117 2118 CU_ASSERT(abort_io->internal.in_submit_request == false); 2119 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2120 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2121 CU_ASSERT(write_io->internal.in_submit_request == false); 2122 CU_ASSERT(write_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2123 CU_ASSERT(io_path1->qpair->num_outstanding_reqs == 0); 2124 2125 /* Aborting the admin request should succeed. */ 2126 admin_io->internal.in_submit_request = true; 2127 bdev_nvme_submit_request(ch1, admin_io); 2128 2129 CU_ASSERT(admin_io->internal.in_submit_request == true); 2130 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 1); 2131 2132 abort_io->internal.ch = (struct spdk_bdev_channel *)ch2; 2133 abort_io->u.abort.bio_to_abort = admin_io; 2134 abort_io->internal.in_submit_request = true; 2135 2136 bdev_nvme_submit_request(ch2, abort_io); 2137 2138 spdk_delay_us(10000); 2139 poll_threads(); 2140 2141 CU_ASSERT(abort_io->internal.in_submit_request == false); 2142 CU_ASSERT(abort_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS); 2143 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2144 CU_ASSERT(admin_io->internal.in_submit_request == false); 2145 CU_ASSERT(admin_io->internal.status == SPDK_BDEV_IO_STATUS_ABORTED); 2146 CU_ASSERT(ctrlr->adminq.num_outstanding_reqs == 0); 2147 2148 set_thread(0); 2149 2150 spdk_put_io_channel(ch1); 2151 2152 set_thread(1); 2153 2154 spdk_put_io_channel(ch2); 2155 2156 poll_threads(); 2157 2158 free(write_io); 2159 free(admin_io); 2160 free(abort_io); 2161 2162 set_thread(1); 2163 2164 rc = bdev_nvme_delete("nvme0", NULL); 2165 CU_ASSERT(rc == 0); 2166 2167 poll_threads(); 2168 2169 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 2170 } 2171 2172 static void 2173 test_get_io_qpair(void) 2174 { 2175 struct spdk_nvme_transport_id trid = {}; 2176 struct spdk_nvme_ctrlr ctrlr = {}; 2177 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr = NULL; 2178 struct spdk_io_channel *ch; 2179 struct nvme_io_path *io_path; 2180 struct spdk_nvme_qpair *qpair; 2181 int rc; 2182 2183 ut_init_trid(&trid); 2184 TAILQ_INIT(&ctrlr.active_io_qpairs); 2185 2186 set_thread(0); 2187 2188 nvme_bdev_ctrlr_create(&ctrlr, "nvme0", &trid, 0, NULL); 2189 2190 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 2191 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 2192 2193 ch = spdk_get_io_channel(nvme_bdev_ctrlr); 2194 SPDK_CU_ASSERT_FATAL(ch != NULL); 2195 io_path = spdk_io_channel_get_ctx(ch); 2196 CU_ASSERT(io_path->qpair != NULL); 2197 2198 qpair = bdev_nvme_get_io_qpair(ch); 2199 CU_ASSERT(qpair == io_path->qpair); 2200 2201 spdk_put_io_channel(ch); 2202 2203 rc = bdev_nvme_delete("nvme0", NULL); 2204 CU_ASSERT(rc == 0); 2205 2206 poll_threads(); 2207 2208 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 2209 } 2210 2211 /* Test a scenario that the bdev subsystem starts shutdown when there still exists 2212 * any NVMe bdev. In this scenario, spdk_bdev_unregister() is called first. Add a 2213 * test case to avoid regression for this scenario. spdk_bdev_unregister() calls 2214 * bdev_nvme_destruct() in the end, and so call bdev_nvme_destruct() directly. 2215 */ 2216 static void 2217 test_bdev_unregister(void) 2218 { 2219 struct spdk_nvme_transport_id trid = {}; 2220 struct spdk_nvme_host_id hostid = {}; 2221 struct spdk_nvme_ctrlr *ctrlr; 2222 struct nvme_bdev_ctrlr *nvme_bdev_ctrlr; 2223 struct nvme_bdev_ns *nvme_ns1, *nvme_ns2; 2224 const int STRING_SIZE = 32; 2225 const char *attached_names[STRING_SIZE]; 2226 struct nvme_bdev *bdev1, *bdev2; 2227 int rc; 2228 2229 memset(attached_names, 0, sizeof(char *) * STRING_SIZE); 2230 ut_init_trid(&trid); 2231 2232 ctrlr = ut_attach_ctrlr(&trid, 2); 2233 SPDK_CU_ASSERT_FATAL(ctrlr != NULL); 2234 2235 g_ut_attach_ctrlr_status = 0; 2236 g_ut_attach_bdev_count = 2; 2237 2238 rc = bdev_nvme_create(&trid, &hostid, "nvme0", attached_names, STRING_SIZE, NULL, 0, 2239 attach_ctrlr_done, NULL, NULL); 2240 CU_ASSERT(rc == 0); 2241 2242 spdk_delay_us(1000); 2243 poll_threads(); 2244 2245 nvme_bdev_ctrlr = nvme_bdev_ctrlr_get_by_name("nvme0"); 2246 SPDK_CU_ASSERT_FATAL(nvme_bdev_ctrlr != NULL); 2247 2248 nvme_ns1 = nvme_bdev_ctrlr->namespaces[0]; 2249 SPDK_CU_ASSERT_FATAL(nvme_ns1 != NULL); 2250 2251 bdev1 = nvme_ns1->bdev; 2252 SPDK_CU_ASSERT_FATAL(bdev1 != NULL); 2253 2254 nvme_ns2 = nvme_bdev_ctrlr->namespaces[1]; 2255 SPDK_CU_ASSERT_FATAL(nvme_ns2 != NULL); 2256 2257 bdev2 = nvme_ns2->bdev; 2258 SPDK_CU_ASSERT_FATAL(bdev2 != NULL); 2259 2260 bdev_nvme_destruct(&bdev1->disk); 2261 bdev_nvme_destruct(&bdev2->disk); 2262 2263 poll_threads(); 2264 2265 CU_ASSERT(nvme_ns1->bdev == NULL); 2266 CU_ASSERT(nvme_ns2->bdev == NULL); 2267 2268 nvme_bdev_ctrlr->destruct = true; 2269 _nvme_bdev_ctrlr_destruct(nvme_bdev_ctrlr); 2270 2271 poll_threads(); 2272 2273 CU_ASSERT(nvme_bdev_ctrlr_get_by_name("nvme0") == NULL); 2274 } 2275 2276 static void 2277 test_compare_ns(void) 2278 { 2279 struct spdk_nvme_ns_data nsdata1 = {}, nsdata2 = {}; 2280 struct spdk_nvme_ctrlr ctrlr1 = { .nsdata = &nsdata1, }, ctrlr2 = { .nsdata = &nsdata2, }; 2281 struct spdk_nvme_ns ns1 = { .id = 1, .ctrlr = &ctrlr1, }, ns2 = { .id = 1, .ctrlr = &ctrlr2, }; 2282 2283 /* No IDs are defined. */ 2284 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2285 2286 /* Only EUI64 are defined and not matched. */ 2287 nsdata1.eui64 = 0xABCDEF0123456789; 2288 nsdata2.eui64 = 0xBBCDEF0123456789; 2289 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2290 2291 /* Only EUI64 are defined and matched. */ 2292 nsdata2.eui64 = 0xABCDEF0123456789; 2293 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2294 2295 /* Only NGUID are defined and not matched. */ 2296 nsdata1.eui64 = 0x0; 2297 nsdata2.eui64 = 0x0; 2298 nsdata1.nguid[0] = 0x12; 2299 nsdata2.nguid[0] = 0x10; 2300 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2301 2302 /* Only NGUID are defined and matched. */ 2303 nsdata2.nguid[0] = 0x12; 2304 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2305 2306 /* Only UUID are defined and not matched. */ 2307 nsdata1.nguid[0] = 0x0; 2308 nsdata2.nguid[0] = 0x0; 2309 ns1.uuid.u.raw[0] = 0xAA; 2310 ns2.uuid.u.raw[0] = 0xAB; 2311 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == false); 2312 2313 /* Only UUID are defined and matched. */ 2314 ns1.uuid.u.raw[0] = 0xAB; 2315 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2316 2317 /* All EUI64, NGUID, and UUID are defined and matched. */ 2318 nsdata1.eui64 = 0x123456789ABCDEF; 2319 nsdata2.eui64 = 0x123456789ABCDEF; 2320 nsdata1.nguid[15] = 0x34; 2321 nsdata2.nguid[15] = 0x34; 2322 CU_ASSERT(bdev_nvme_compare_ns(&ns1, &ns2) == true); 2323 } 2324 2325 static void 2326 init_accel(void) 2327 { 2328 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 2329 sizeof(int), "accel_p"); 2330 } 2331 2332 static void 2333 fini_accel(void) 2334 { 2335 spdk_io_device_unregister(g_accel_p, NULL); 2336 } 2337 2338 int 2339 main(int argc, const char **argv) 2340 { 2341 CU_pSuite suite = NULL; 2342 unsigned int num_failures; 2343 2344 CU_set_error_action(CUEA_ABORT); 2345 CU_initialize_registry(); 2346 2347 suite = CU_add_suite("nvme", NULL, NULL); 2348 2349 CU_ADD_TEST(suite, test_create_ctrlr); 2350 CU_ADD_TEST(suite, test_reset_ctrlr); 2351 CU_ADD_TEST(suite, test_race_between_reset_and_destruct_ctrlr); 2352 CU_ADD_TEST(suite, test_failover_ctrlr); 2353 CU_ADD_TEST(suite, test_pending_reset); 2354 CU_ADD_TEST(suite, test_attach_ctrlr); 2355 CU_ADD_TEST(suite, test_reconnect_qpair); 2356 CU_ADD_TEST(suite, test_aer_cb); 2357 CU_ADD_TEST(suite, test_submit_nvme_cmd); 2358 CU_ADD_TEST(suite, test_remove_trid); 2359 CU_ADD_TEST(suite, test_abort); 2360 CU_ADD_TEST(suite, test_get_io_qpair); 2361 CU_ADD_TEST(suite, test_bdev_unregister); 2362 CU_ADD_TEST(suite, test_compare_ns); 2363 2364 CU_basic_set_mode(CU_BRM_VERBOSE); 2365 2366 allocate_threads(3); 2367 set_thread(0); 2368 bdev_nvme_library_init(); 2369 init_accel(); 2370 2371 CU_basic_run_tests(); 2372 2373 set_thread(0); 2374 bdev_nvme_library_fini(); 2375 fini_accel(); 2376 free_threads(); 2377 2378 num_failures = CU_get_number_of_failures(); 2379 CU_cleanup_registry(); 2380 2381 return num_failures; 2382 } 2383