1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2016 Intel Corporation. All rights reserved. 3 * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 8 #include "common/lib/ut_multithread.c" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/nvmf.h" 11 #include "spdk_internal/mock.h" 12 13 #include "spdk/bdev_module.h" 14 #include "nvmf/subsystem.c" 15 #include "nvmf/transport.c" 16 17 SPDK_LOG_REGISTER_COMPONENT(nvmf) 18 19 DEFINE_STUB(spdk_bdev_module_claim_bdev, 20 int, 21 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 22 struct spdk_bdev_module *module), 0); 23 24 DEFINE_STUB_V(spdk_bdev_module_release_bdev, 25 (struct spdk_bdev *bdev)); 26 27 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, 28 (const struct spdk_bdev *bdev), 512); 29 30 DEFINE_STUB(spdk_bdev_get_md_size, uint32_t, 31 (const struct spdk_bdev *bdev), 0); 32 33 DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, 34 (const struct spdk_bdev *bdev), false); 35 36 DEFINE_STUB(spdk_bdev_io_type_supported, bool, 37 (struct spdk_bdev *bdev, 38 enum spdk_bdev_io_type io_type), false); 39 40 DEFINE_STUB_V(nvmf_update_discovery_log, 41 (struct spdk_nvmf_tgt *tgt, const char *hostnqn)); 42 43 DEFINE_STUB(spdk_nvmf_qpair_disconnect, 44 int, 45 (struct spdk_nvmf_qpair *qpair, 46 nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0); 47 48 DEFINE_STUB(spdk_nvmf_request_complete, 49 int, 50 (struct spdk_nvmf_request *req), 0); 51 52 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, 53 int, 54 (struct spdk_nvmf_ctrlr *ctrlr), 0); 55 56 DEFINE_STUB(spdk_nvme_transport_id_trtype_str, 57 const char *, 58 (enum spdk_nvme_transport_type trtype), NULL); 59 60 DEFINE_STUB(spdk_bdev_is_zoned, bool, 61 (const struct spdk_bdev *bdev), false); 62 63 DEFINE_STUB(spdk_bdev_get_max_zone_append_size, uint32_t, 64 (const struct spdk_bdev *bdev), 0); 65 66 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, 67 (const char *name), NULL); 68 69 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, 70 (enum spdk_nvmf_adrfam adrfam), NULL); 71 72 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, 73 (struct spdk_nvmf_qpair *qpair, 74 struct spdk_nvme_transport_id *trid), 0); 75 76 static struct spdk_nvmf_transport g_transport = {}; 77 78 struct spdk_nvmf_subsystem * 79 spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn) 80 { 81 return NULL; 82 } 83 84 struct spdk_nvmf_transport * 85 spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name) 86 { 87 if (strncmp(transport_name, SPDK_NVME_TRANSPORT_NAME_RDMA, SPDK_NVMF_TRSTRING_MAX_LEN)) { 88 return &g_transport; 89 } 90 91 return NULL; 92 } 93 94 int 95 nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group, 96 struct spdk_nvmf_subsystem *subsystem) 97 { 98 return 0; 99 } 100 101 int 102 nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group, 103 struct spdk_nvmf_subsystem *subsystem, 104 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 105 { 106 return 0; 107 } 108 109 void 110 nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group, 111 struct spdk_nvmf_subsystem *subsystem, 112 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 113 { 114 } 115 116 void 117 nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group, 118 struct spdk_nvmf_subsystem *subsystem, 119 uint32_t nsid, 120 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 121 { 122 } 123 124 void 125 nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group, 126 struct spdk_nvmf_subsystem *subsystem, 127 spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg) 128 { 129 } 130 131 int 132 spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str) 133 { 134 if (trtype == NULL || str == NULL) { 135 return -EINVAL; 136 } 137 138 if (strcasecmp(str, "PCIe") == 0) { 139 *trtype = SPDK_NVME_TRANSPORT_PCIE; 140 } else if (strcasecmp(str, "RDMA") == 0) { 141 *trtype = SPDK_NVME_TRANSPORT_RDMA; 142 } else { 143 return -ENOENT; 144 } 145 return 0; 146 } 147 148 int 149 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 150 const struct spdk_nvme_transport_id *trid2) 151 { 152 return 0; 153 } 154 155 int32_t 156 spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr) 157 { 158 return -1; 159 } 160 161 int32_t 162 spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions) 163 { 164 return -1; 165 } 166 167 int 168 spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr) 169 { 170 return -1; 171 } 172 173 void 174 nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr) 175 { 176 } 177 178 static struct spdk_nvmf_ctrlr *g_ns_changed_ctrlr = NULL; 179 static uint32_t g_ns_changed_nsid = 0; 180 void 181 nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid) 182 { 183 g_ns_changed_ctrlr = ctrlr; 184 g_ns_changed_nsid = nsid; 185 } 186 187 static struct spdk_bdev g_bdevs[] = { 188 { .name = "bdev1" }, 189 { .name = "bdev2" }, 190 }; 191 192 struct spdk_bdev_desc { 193 struct spdk_bdev *bdev; 194 }; 195 196 int 197 spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb, 198 void *event_ctx, struct spdk_bdev_desc **_desc) 199 { 200 struct spdk_bdev_desc *desc; 201 size_t i; 202 203 for (i = 0; i < sizeof(g_bdevs); i++) { 204 if (strcmp(bdev_name, g_bdevs[i].name) == 0) { 205 206 desc = calloc(1, sizeof(*desc)); 207 SPDK_CU_ASSERT_FATAL(desc != NULL); 208 209 desc->bdev = &g_bdevs[i]; 210 *_desc = desc; 211 return 0; 212 } 213 } 214 215 return -EINVAL; 216 } 217 218 void 219 spdk_bdev_close(struct spdk_bdev_desc *desc) 220 { 221 free(desc); 222 } 223 224 struct spdk_bdev * 225 spdk_bdev_desc_get_bdev(struct spdk_bdev_desc *desc) 226 { 227 return desc->bdev; 228 } 229 230 const char * 231 spdk_bdev_get_name(const struct spdk_bdev *bdev) 232 { 233 return "test"; 234 } 235 236 const struct spdk_uuid * 237 spdk_bdev_get_uuid(const struct spdk_bdev *bdev) 238 { 239 return &bdev->uuid; 240 } 241 242 static void 243 test_spdk_nvmf_subsystem_add_ns(void) 244 { 245 struct spdk_nvmf_tgt tgt = {}; 246 struct spdk_nvmf_subsystem subsystem = { 247 .max_nsid = 1024, 248 .ns = NULL, 249 .tgt = &tgt, 250 }; 251 struct spdk_nvmf_ns_opts ns_opts; 252 uint32_t nsid; 253 int rc; 254 255 subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *)); 256 SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL); 257 subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t)); 258 SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL); 259 260 tgt.max_subsystems = 1024; 261 RB_INIT(&tgt.subsystems); 262 263 /* Request a specific NSID */ 264 spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts)); 265 ns_opts.nsid = 5; 266 nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL); 267 CU_ASSERT(nsid == 5); 268 CU_ASSERT(subsystem.max_nsid == 1024); 269 SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL); 270 CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[1]); 271 272 /* Request an NSID that is already in use */ 273 spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts)); 274 ns_opts.nsid = 5; 275 nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL); 276 CU_ASSERT(nsid == 0); 277 CU_ASSERT(subsystem.max_nsid == 1024); 278 279 /* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */ 280 spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts)); 281 ns_opts.nsid = 0xFFFFFFFF; 282 nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev2", &ns_opts, sizeof(ns_opts), NULL); 283 CU_ASSERT(nsid == 0); 284 CU_ASSERT(subsystem.max_nsid == 1024); 285 286 rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 5); 287 CU_ASSERT(rc == 0); 288 289 free(subsystem.ns); 290 free(subsystem.ana_group); 291 } 292 293 static void 294 nvmf_test_create_subsystem(void) 295 { 296 struct spdk_nvmf_tgt tgt = {}; 297 char nqn[256]; 298 struct spdk_nvmf_subsystem *subsystem; 299 int rc; 300 301 tgt.max_subsystems = 1024; 302 tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems); 303 RB_INIT(&tgt.subsystems); 304 305 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1"); 306 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 307 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 308 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 309 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 310 CU_ASSERT(rc == 0); 311 312 /* valid name with complex reverse domain */ 313 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1"); 314 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 315 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 316 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 317 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 318 CU_ASSERT(rc == 0); 319 320 /* Valid name discovery controller */ 321 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1"); 322 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 323 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 324 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 325 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 326 CU_ASSERT(rc == 0); 327 328 /* Invalid name, no user supplied string */ 329 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:"); 330 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 331 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 332 333 /* Valid name, only contains top-level domain name */ 334 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1"); 335 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 336 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 337 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 338 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 339 CU_ASSERT(rc == 0); 340 341 /* Invalid name, domain label > 63 characters */ 342 snprintf(nqn, sizeof(nqn), 343 "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub"); 344 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 345 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 346 347 /* Invalid name, domain label starts with digit */ 348 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub"); 349 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 350 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 351 352 /* Invalid name, domain label starts with - */ 353 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1"); 354 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 355 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 356 357 /* Invalid name, domain label ends with - */ 358 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1"); 359 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 360 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 361 362 /* Invalid name, domain label with multiple consecutive periods */ 363 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1"); 364 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 365 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 366 367 /* Longest valid name */ 368 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:"); 369 memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn)); 370 nqn[223] = '\0'; 371 CU_ASSERT(strlen(nqn) == 223); 372 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 373 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 374 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 375 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 376 CU_ASSERT(rc == 0); 377 378 /* Invalid name, too long */ 379 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:"); 380 memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn)); 381 nqn[224] = '\0'; 382 CU_ASSERT(strlen(nqn) == 224); 383 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 384 CU_ASSERT(subsystem == NULL); 385 386 /* Valid name using uuid format */ 387 snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9b6406-0fc8-4779-80ca-4dca14bda0d2"); 388 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 389 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 390 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 391 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 392 CU_ASSERT(rc == 0); 393 394 /* Invalid name user string contains an invalid utf-8 character */ 395 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1"); 396 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 397 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 398 399 /* Valid name with non-ascii but valid utf-8 characters */ 400 snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80"); 401 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 402 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 403 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn); 404 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 405 CU_ASSERT(rc == 0); 406 407 /* Invalid uuid (too long) */ 408 snprintf(nqn, sizeof(nqn), 409 "nqn.2014-08.org.nvmexpress:uuid:ff9b6406-0fc8-4779-80ca-4dca14bda0d2aaaa"); 410 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 411 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 412 413 /* Invalid uuid (dashes placed incorrectly) */ 414 snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9b64-060fc8-4779-80ca-4dca14bda0d2"); 415 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 416 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 417 418 /* Invalid uuid (invalid characters in uuid) */ 419 snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:ff9hg406-0fc8-4779-80ca-4dca14bda0d2"); 420 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 421 SPDK_CU_ASSERT_FATAL(subsystem == NULL); 422 423 spdk_bit_array_free(&tgt.subsystem_ids); 424 } 425 426 static void 427 test_spdk_nvmf_subsystem_set_sn(void) 428 { 429 struct spdk_nvmf_subsystem subsystem = {}; 430 431 /* Basic valid serial number */ 432 CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0); 433 CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0); 434 435 /* Exactly 20 characters (valid) */ 436 CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0); 437 CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0); 438 439 /* 21 characters (too long, invalid) */ 440 CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0); 441 442 /* Non-ASCII characters (invalid) */ 443 CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0); 444 } 445 446 /* 447 * Reservation Unit Test Configuration 448 * -------- -------- -------- 449 * | Host A | | Host B | | Host C | 450 * -------- -------- -------- 451 * / \ | | 452 * -------- -------- ------- ------- 453 * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C| 454 * -------- -------- ------- ------- 455 * \ \ / / 456 * \ \ / / 457 * \ \ / / 458 * -------------------------------------- 459 * | NAMESPACE 1 | 460 * -------------------------------------- 461 */ 462 static struct spdk_nvmf_subsystem g_subsystem; 463 static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C; 464 static struct spdk_nvmf_ns g_ns; 465 static struct spdk_bdev g_bdev; 466 struct spdk_nvmf_subsystem_pg_ns_info g_ns_info; 467 468 void 469 nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr) 470 { 471 } 472 473 static void 474 ut_reservation_init(void) 475 { 476 477 TAILQ_INIT(&g_subsystem.ctrlrs); 478 479 memset(&g_ns, 0, sizeof(g_ns)); 480 TAILQ_INIT(&g_ns.registrants); 481 g_ns.subsystem = &g_subsystem; 482 g_ns.ptpl_file = NULL; 483 g_ns.ptpl_activated = false; 484 spdk_uuid_generate(&g_bdev.uuid); 485 g_ns.bdev = &g_bdev; 486 487 /* Host A has two controllers */ 488 spdk_uuid_generate(&g_ctrlr1_A.hostid); 489 TAILQ_INIT(&g_ctrlr1_A.log_head); 490 g_ctrlr1_A.subsys = &g_subsystem; 491 g_ctrlr1_A.num_avail_log_pages = 0; 492 TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr1_A, link); 493 spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid); 494 TAILQ_INIT(&g_ctrlr2_A.log_head); 495 g_ctrlr2_A.subsys = &g_subsystem; 496 g_ctrlr2_A.num_avail_log_pages = 0; 497 TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr2_A, link); 498 499 /* Host B has 1 controller */ 500 spdk_uuid_generate(&g_ctrlr_B.hostid); 501 TAILQ_INIT(&g_ctrlr_B.log_head); 502 g_ctrlr_B.subsys = &g_subsystem; 503 g_ctrlr_B.num_avail_log_pages = 0; 504 TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_B, link); 505 506 /* Host C has 1 controller */ 507 spdk_uuid_generate(&g_ctrlr_C.hostid); 508 TAILQ_INIT(&g_ctrlr_C.log_head); 509 g_ctrlr_C.subsys = &g_subsystem; 510 g_ctrlr_C.num_avail_log_pages = 0; 511 TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_C, link); 512 } 513 514 static void 515 ut_reservation_deinit(void) 516 { 517 struct spdk_nvmf_registrant *reg, *tmp; 518 struct spdk_nvmf_reservation_log *log, *log_tmp; 519 struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp; 520 521 TAILQ_FOREACH_SAFE(reg, &g_ns.registrants, link, tmp) { 522 TAILQ_REMOVE(&g_ns.registrants, reg, link); 523 free(reg); 524 } 525 TAILQ_FOREACH_SAFE(log, &g_ctrlr1_A.log_head, link, log_tmp) { 526 TAILQ_REMOVE(&g_ctrlr1_A.log_head, log, link); 527 free(log); 528 } 529 g_ctrlr1_A.num_avail_log_pages = 0; 530 TAILQ_FOREACH_SAFE(log, &g_ctrlr2_A.log_head, link, log_tmp) { 531 TAILQ_REMOVE(&g_ctrlr2_A.log_head, log, link); 532 free(log); 533 } 534 g_ctrlr2_A.num_avail_log_pages = 0; 535 TAILQ_FOREACH_SAFE(log, &g_ctrlr_B.log_head, link, log_tmp) { 536 TAILQ_REMOVE(&g_ctrlr_B.log_head, log, link); 537 free(log); 538 } 539 g_ctrlr_B.num_avail_log_pages = 0; 540 TAILQ_FOREACH_SAFE(log, &g_ctrlr_C.log_head, link, log_tmp) { 541 TAILQ_REMOVE(&g_ctrlr_C.log_head, log, link); 542 free(log); 543 } 544 g_ctrlr_C.num_avail_log_pages = 0; 545 546 TAILQ_FOREACH_SAFE(ctrlr, &g_subsystem.ctrlrs, link, ctrlr_tmp) { 547 TAILQ_REMOVE(&g_subsystem.ctrlrs, ctrlr, link); 548 } 549 } 550 551 static struct spdk_nvmf_request * 552 ut_reservation_build_req(uint32_t length) 553 { 554 struct spdk_nvmf_request *req; 555 556 req = calloc(1, sizeof(*req)); 557 assert(req != NULL); 558 559 spdk_iov_one(req->iov, &req->iovcnt, calloc(1, length), length); 560 assert(req->iov[0].iov_base != NULL); 561 req->length = length; 562 563 req->cmd = (union nvmf_h2c_msg *)calloc(1, sizeof(union nvmf_h2c_msg)); 564 assert(req->cmd != NULL); 565 566 req->rsp = (union nvmf_c2h_msg *)calloc(1, sizeof(union nvmf_c2h_msg)); 567 assert(req->rsp != NULL); 568 569 return req; 570 } 571 572 static void 573 ut_reservation_free_req(struct spdk_nvmf_request *req) 574 { 575 free(req->cmd); 576 free(req->rsp); 577 free(req->iov[0].iov_base); 578 free(req); 579 } 580 581 static void 582 ut_reservation_build_register_request(struct spdk_nvmf_request *req, 583 uint8_t rrega, uint8_t iekey, 584 uint8_t cptpl, uint64_t crkey, 585 uint64_t nrkey) 586 { 587 struct spdk_nvme_reservation_register_data key; 588 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 589 590 key.crkey = crkey; 591 key.nrkey = nrkey; 592 cmd->cdw10 = 0; 593 cmd->cdw10_bits.resv_register.rrega = rrega; 594 cmd->cdw10_bits.resv_register.iekey = iekey; 595 cmd->cdw10_bits.resv_register.cptpl = cptpl; 596 memcpy(req->iov[0].iov_base, &key, sizeof(key)); 597 } 598 599 static void 600 ut_reservation_build_acquire_request(struct spdk_nvmf_request *req, 601 uint8_t racqa, uint8_t iekey, 602 uint8_t rtype, uint64_t crkey, 603 uint64_t prkey) 604 { 605 struct spdk_nvme_reservation_acquire_data key; 606 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 607 608 key.crkey = crkey; 609 key.prkey = prkey; 610 cmd->cdw10 = 0; 611 cmd->cdw10_bits.resv_acquire.racqa = racqa; 612 cmd->cdw10_bits.resv_acquire.iekey = iekey; 613 cmd->cdw10_bits.resv_acquire.rtype = rtype; 614 memcpy(req->iov[0].iov_base, &key, sizeof(key)); 615 } 616 617 static void 618 ut_reservation_build_release_request(struct spdk_nvmf_request *req, 619 uint8_t rrela, uint8_t iekey, 620 uint8_t rtype, uint64_t crkey) 621 { 622 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 623 624 cmd->cdw10 = 0; 625 cmd->cdw10_bits.resv_release.rrela = rrela; 626 cmd->cdw10_bits.resv_release.iekey = iekey; 627 cmd->cdw10_bits.resv_release.rtype = rtype; 628 memcpy(req->iov[0].iov_base, &crkey, sizeof(crkey)); 629 } 630 631 /* 632 * Construct four registrants for other test cases. 633 * 634 * g_ctrlr1_A register with key 0xa1. 635 * g_ctrlr2_A register with key 0xa1. 636 * g_ctrlr_B register with key 0xb1. 637 * g_ctrlr_C register with key 0xc1. 638 * */ 639 static void 640 ut_reservation_build_registrants(void) 641 { 642 struct spdk_nvmf_request *req; 643 struct spdk_nvme_cpl *rsp; 644 struct spdk_nvmf_registrant *reg; 645 uint32_t gen; 646 647 req = ut_reservation_build_req(16); 648 rsp = &req->rsp->nvme_cpl; 649 SPDK_CU_ASSERT_FATAL(req != NULL); 650 gen = g_ns.gen; 651 652 /* TEST CASE: g_ctrlr1_A register with a new key */ 653 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 654 0, 0, 0, 0xa1); 655 nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 656 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 657 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 658 SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1); 659 SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 1); 660 661 /* TEST CASE: g_ctrlr2_A register with a new key, because it has same 662 * Host Identifier with g_ctrlr1_A, so the register key should same. 663 */ 664 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 665 0, 0, 0, 0xa2); 666 nvmf_ns_reservation_register(&g_ns, &g_ctrlr2_A, req); 667 /* Reservation conflict for other key than 0xa1 */ 668 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT); 669 670 /* g_ctrlr_B register with a new key */ 671 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 672 0, 0, 0, 0xb1); 673 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req); 674 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 675 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 676 SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb1); 677 SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 2); 678 679 /* g_ctrlr_C register with a new key */ 680 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 681 0, 0, 0, 0xc1); 682 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req); 683 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 684 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid); 685 SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc1); 686 SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 3); 687 688 ut_reservation_free_req(req); 689 } 690 691 static void 692 test_reservation_register(void) 693 { 694 struct spdk_nvmf_request *req; 695 struct spdk_nvme_cpl *rsp; 696 struct spdk_nvmf_registrant *reg; 697 uint32_t gen; 698 699 ut_reservation_init(); 700 701 req = ut_reservation_build_req(16); 702 rsp = &req->rsp->nvme_cpl; 703 SPDK_CU_ASSERT_FATAL(req != NULL); 704 705 ut_reservation_build_registrants(); 706 707 /* TEST CASE: Replace g_ctrlr1_A with a new key */ 708 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY, 709 0, 0, 0xa1, 0xa11); 710 nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 711 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 712 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 713 SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa11); 714 715 /* TEST CASE: Host A with g_ctrlr1_A get reservation with 716 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE 717 */ 718 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 719 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xa11, 0x0); 720 gen = g_ns.gen; 721 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req); 722 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 723 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 724 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 725 SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa11); 726 SPDK_CU_ASSERT_FATAL(g_ns.holder == reg); 727 SPDK_CU_ASSERT_FATAL(g_ns.gen == gen); 728 729 /* TEST CASE: g_ctrlr_C unregister with IEKEY enabled */ 730 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY, 731 1, 0, 0, 0); 732 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req); 733 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 734 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid); 735 SPDK_CU_ASSERT_FATAL(reg == NULL); 736 737 /* TEST CASE: g_ctrlr_B unregister with correct key */ 738 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY, 739 0, 0, 0xb1, 0); 740 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req); 741 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 742 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 743 SPDK_CU_ASSERT_FATAL(reg == NULL); 744 745 /* TEST CASE: No registrant now, g_ctrlr_B replace new key with IEKEY disabled */ 746 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY, 747 0, 0, 0, 0xb1); 748 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req); 749 SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS); 750 751 /* TEST CASE: No registrant now, g_ctrlr_B replace new key with IEKEY enabled */ 752 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY, 753 1, 0, 0, 0xb1); 754 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req); 755 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 756 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 757 SPDK_CU_ASSERT_FATAL(reg != NULL); 758 759 /* TEST CASE: g_ctrlr_B replace new key with IEKEY enabled and wrong crkey */ 760 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY, 761 1, 0, 0xff, 0xb2); 762 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req); 763 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 764 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 765 SPDK_CU_ASSERT_FATAL(reg != NULL); 766 SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb2); 767 768 /* TEST CASE: g_ctrlr1_A unregister with correct key, 769 * reservation should be removed as well. 770 */ 771 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY, 772 0, 0, 0xa11, 0); 773 nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 774 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 775 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 776 SPDK_CU_ASSERT_FATAL(reg == NULL); 777 SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0); 778 SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0); 779 SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL); 780 781 ut_reservation_free_req(req); 782 ut_reservation_deinit(); 783 } 784 785 static void 786 test_reservation_register_with_ptpl(void) 787 { 788 struct spdk_nvmf_request *req; 789 struct spdk_nvme_cpl *rsp; 790 struct spdk_nvmf_registrant *reg; 791 bool update_sgroup = false; 792 int rc; 793 struct spdk_nvmf_reservation_info info; 794 795 ut_reservation_init(); 796 797 req = ut_reservation_build_req(16); 798 rsp = &req->rsp->nvme_cpl; 799 SPDK_CU_ASSERT_FATAL(req != NULL); 800 801 /* TEST CASE: No persistent file, register with PTPL enabled will fail */ 802 g_ns.ptpl_file = NULL; 803 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0, 804 SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1); 805 update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 806 SPDK_CU_ASSERT_FATAL(update_sgroup == false); 807 SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS); 808 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 809 SPDK_CU_ASSERT_FATAL(reg == NULL); 810 811 /* TEST CASE: Enable PTPL */ 812 g_ns.ptpl_file = "/tmp/Ns1PR.cfg"; 813 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0, 814 SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1); 815 update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 816 SPDK_CU_ASSERT_FATAL(update_sgroup == true); 817 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 818 SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true); 819 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 820 SPDK_CU_ASSERT_FATAL(reg != NULL); 821 SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, ®->hostid)); 822 /* Load reservation information from configuration file */ 823 memset(&info, 0, sizeof(info)); 824 rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info); 825 SPDK_CU_ASSERT_FATAL(rc == 0); 826 SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true); 827 828 /* TEST CASE: Disable PTPL */ 829 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 830 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0, 831 SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON, 0, 0xa1); 832 update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 833 SPDK_CU_ASSERT_FATAL(update_sgroup == true); 834 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 835 SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == false); 836 rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info); 837 SPDK_CU_ASSERT_FATAL(rc < 0); 838 unlink(g_ns.ptpl_file); 839 840 ut_reservation_free_req(req); 841 ut_reservation_deinit(); 842 } 843 844 static void 845 test_reservation_acquire_preempt_1(void) 846 { 847 struct spdk_nvmf_request *req; 848 struct spdk_nvme_cpl *rsp; 849 struct spdk_nvmf_registrant *reg; 850 uint32_t gen; 851 852 ut_reservation_init(); 853 854 req = ut_reservation_build_req(16); 855 rsp = &req->rsp->nvme_cpl; 856 SPDK_CU_ASSERT_FATAL(req != NULL); 857 858 ut_reservation_build_registrants(); 859 860 gen = g_ns.gen; 861 /* ACQUIRE: Host A with g_ctrlr1_A acquire reservation with 862 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE. 863 */ 864 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 865 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0); 866 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req); 867 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 868 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 869 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 870 SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa1); 871 SPDK_CU_ASSERT_FATAL(g_ns.holder == reg); 872 SPDK_CU_ASSERT_FATAL(g_ns.gen == gen); 873 874 /* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A, 875 * g_ctrl1_A registrant is unregistered. 876 */ 877 gen = g_ns.gen; 878 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0, 879 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1, 0xa1); 880 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req); 881 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 882 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 883 SPDK_CU_ASSERT_FATAL(reg == NULL); 884 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 885 SPDK_CU_ASSERT_FATAL(reg != NULL); 886 SPDK_CU_ASSERT_FATAL(g_ns.holder == reg); 887 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid); 888 SPDK_CU_ASSERT_FATAL(reg != NULL); 889 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 890 SPDK_CU_ASSERT_FATAL(g_ns.gen > gen); 891 892 /* TEST CASE: g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B 893 * with valid key and PRKEY set to 0, all registrants other the host that issued 894 * the command are unregistered. 895 */ 896 gen = g_ns.gen; 897 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0, 898 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0x0); 899 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req); 900 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 901 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid); 902 SPDK_CU_ASSERT_FATAL(reg == NULL); 903 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 904 SPDK_CU_ASSERT_FATAL(reg == NULL); 905 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid); 906 SPDK_CU_ASSERT_FATAL(reg != NULL); 907 SPDK_CU_ASSERT_FATAL(g_ns.holder == reg); 908 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 909 SPDK_CU_ASSERT_FATAL(g_ns.gen > gen); 910 911 ut_reservation_free_req(req); 912 ut_reservation_deinit(); 913 } 914 915 static void 916 test_reservation_acquire_release_with_ptpl(void) 917 { 918 struct spdk_nvmf_request *req; 919 struct spdk_nvme_cpl *rsp; 920 struct spdk_nvmf_registrant *reg; 921 bool update_sgroup = false; 922 struct spdk_uuid holder_uuid; 923 int rc; 924 struct spdk_nvmf_reservation_info info; 925 926 ut_reservation_init(); 927 928 req = ut_reservation_build_req(16); 929 rsp = &req->rsp->nvme_cpl; 930 SPDK_CU_ASSERT_FATAL(req != NULL); 931 932 /* TEST CASE: Enable PTPL */ 933 g_ns.ptpl_file = "/tmp/Ns1PR.cfg"; 934 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0, 935 SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1); 936 update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req); 937 SPDK_CU_ASSERT_FATAL(update_sgroup == true); 938 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 939 SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true); 940 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 941 SPDK_CU_ASSERT_FATAL(reg != NULL); 942 SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, ®->hostid)); 943 /* Load reservation information from configuration file */ 944 memset(&info, 0, sizeof(info)); 945 rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info); 946 SPDK_CU_ASSERT_FATAL(rc == 0); 947 SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true); 948 949 /* TEST CASE: Acquire the reservation */ 950 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 951 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 952 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0); 953 update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req); 954 SPDK_CU_ASSERT_FATAL(update_sgroup == true); 955 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 956 memset(&info, 0, sizeof(info)); 957 rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info); 958 SPDK_CU_ASSERT_FATAL(rc == 0); 959 SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true); 960 SPDK_CU_ASSERT_FATAL(info.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 961 SPDK_CU_ASSERT_FATAL(info.crkey == 0xa1); 962 spdk_uuid_parse(&holder_uuid, info.holder_uuid); 963 SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &holder_uuid)); 964 965 /* TEST CASE: Release the reservation */ 966 rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD; 967 ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0, 968 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1); 969 update_sgroup = nvmf_ns_reservation_release(&g_ns, &g_ctrlr1_A, req); 970 SPDK_CU_ASSERT_FATAL(update_sgroup == true); 971 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 972 memset(&info, 0, sizeof(info)); 973 rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info); 974 SPDK_CU_ASSERT_FATAL(rc == 0); 975 SPDK_CU_ASSERT_FATAL(info.rtype == 0); 976 SPDK_CU_ASSERT_FATAL(info.crkey == 0); 977 SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true); 978 unlink(g_ns.ptpl_file); 979 980 ut_reservation_free_req(req); 981 ut_reservation_deinit(); 982 } 983 984 static void 985 test_reservation_release(void) 986 { 987 struct spdk_nvmf_request *req; 988 struct spdk_nvme_cpl *rsp; 989 struct spdk_nvmf_registrant *reg; 990 991 ut_reservation_init(); 992 993 req = ut_reservation_build_req(16); 994 rsp = &req->rsp->nvme_cpl; 995 SPDK_CU_ASSERT_FATAL(req != NULL); 996 997 ut_reservation_build_registrants(); 998 999 /* ACQUIRE: Host A with g_ctrlr1_A get reservation with 1000 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS 1001 */ 1002 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 1003 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xa1, 0x0); 1004 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req); 1005 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1006 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 1007 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1008 SPDK_CU_ASSERT_FATAL(g_ns.holder == reg); 1009 1010 /* Test Case: Host B release the reservation */ 1011 ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0, 1012 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1); 1013 nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req); 1014 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1015 SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0); 1016 SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0); 1017 SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL); 1018 1019 /* Test Case: Host C clear the registrants */ 1020 ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0, 1021 0, 0xc1); 1022 nvmf_ns_reservation_release(&g_ns, &g_ctrlr_C, req); 1023 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1024 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid); 1025 SPDK_CU_ASSERT_FATAL(reg == NULL); 1026 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid); 1027 SPDK_CU_ASSERT_FATAL(reg == NULL); 1028 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid); 1029 SPDK_CU_ASSERT_FATAL(reg == NULL); 1030 reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid); 1031 SPDK_CU_ASSERT_FATAL(reg == NULL); 1032 1033 ut_reservation_free_req(req); 1034 ut_reservation_deinit(); 1035 } 1036 1037 void 1038 nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr, 1039 struct spdk_nvmf_ns *ns, 1040 enum spdk_nvme_reservation_notification_log_page_type type) 1041 { 1042 ctrlr->num_avail_log_pages++; 1043 } 1044 1045 static void 1046 test_reservation_unregister_notification(void) 1047 { 1048 struct spdk_nvmf_request *req; 1049 struct spdk_nvme_cpl *rsp; 1050 1051 ut_reservation_init(); 1052 1053 req = ut_reservation_build_req(16); 1054 SPDK_CU_ASSERT_FATAL(req != NULL); 1055 rsp = &req->rsp->nvme_cpl; 1056 1057 ut_reservation_build_registrants(); 1058 1059 /* ACQUIRE: Host B with g_ctrlr_B get reservation with 1060 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY 1061 */ 1062 rsp->status.sc = 0xff; 1063 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 1064 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0); 1065 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req); 1066 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1067 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1068 1069 /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B unregister the registration. 1070 * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C only for 1071 * SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY or SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY 1072 * type. 1073 */ 1074 rsp->status.sc = 0xff; 1075 g_ctrlr1_A.num_avail_log_pages = 0; 1076 g_ctrlr2_A.num_avail_log_pages = 0; 1077 g_ctrlr_B.num_avail_log_pages = 5; 1078 g_ctrlr_C.num_avail_log_pages = 0; 1079 ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY, 1080 0, 0, 0xb1, 0); 1081 nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req); 1082 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1083 SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0); 1084 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages); 1085 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages); 1086 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages); 1087 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages); 1088 1089 ut_reservation_free_req(req); 1090 ut_reservation_deinit(); 1091 } 1092 1093 static void 1094 test_reservation_release_notification(void) 1095 { 1096 struct spdk_nvmf_request *req; 1097 struct spdk_nvme_cpl *rsp; 1098 1099 ut_reservation_init(); 1100 1101 req = ut_reservation_build_req(16); 1102 SPDK_CU_ASSERT_FATAL(req != NULL); 1103 rsp = &req->rsp->nvme_cpl; 1104 1105 ut_reservation_build_registrants(); 1106 1107 /* ACQUIRE: Host B with g_ctrlr_B get reservation with 1108 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY 1109 */ 1110 rsp->status.sc = 0xff; 1111 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 1112 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0); 1113 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req); 1114 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1115 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1116 1117 /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation. 1118 * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C. 1119 */ 1120 rsp->status.sc = 0xff; 1121 g_ctrlr1_A.num_avail_log_pages = 0; 1122 g_ctrlr2_A.num_avail_log_pages = 0; 1123 g_ctrlr_B.num_avail_log_pages = 5; 1124 g_ctrlr_C.num_avail_log_pages = 0; 1125 ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0, 1126 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1); 1127 nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req); 1128 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1129 SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0); 1130 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages); 1131 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages); 1132 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages); 1133 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages); 1134 1135 ut_reservation_free_req(req); 1136 ut_reservation_deinit(); 1137 } 1138 1139 static void 1140 test_reservation_release_notification_write_exclusive(void) 1141 { 1142 struct spdk_nvmf_request *req; 1143 struct spdk_nvme_cpl *rsp; 1144 1145 ut_reservation_init(); 1146 1147 req = ut_reservation_build_req(16); 1148 SPDK_CU_ASSERT_FATAL(req != NULL); 1149 rsp = &req->rsp->nvme_cpl; 1150 1151 ut_reservation_build_registrants(); 1152 1153 /* ACQUIRE: Host B with g_ctrlr_B get reservation with 1154 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE 1155 */ 1156 rsp->status.sc = 0xff; 1157 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 1158 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1, 0x0); 1159 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req); 1160 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1161 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1162 1163 /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation. 1164 * Because the reservation type is SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 1165 * no reservation notification occurs. 1166 */ 1167 rsp->status.sc = 0xff; 1168 g_ctrlr1_A.num_avail_log_pages = 5; 1169 g_ctrlr2_A.num_avail_log_pages = 5; 1170 g_ctrlr_B.num_avail_log_pages = 5; 1171 g_ctrlr_C.num_avail_log_pages = 5; 1172 ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0, 1173 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1); 1174 nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req); 1175 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1176 SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0); 1177 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr1_A.num_avail_log_pages); 1178 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr2_A.num_avail_log_pages); 1179 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages); 1180 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages); 1181 1182 ut_reservation_free_req(req); 1183 ut_reservation_deinit(); 1184 } 1185 1186 static void 1187 test_reservation_clear_notification(void) 1188 { 1189 struct spdk_nvmf_request *req; 1190 struct spdk_nvme_cpl *rsp; 1191 1192 ut_reservation_init(); 1193 1194 req = ut_reservation_build_req(16); 1195 SPDK_CU_ASSERT_FATAL(req != NULL); 1196 rsp = &req->rsp->nvme_cpl; 1197 1198 ut_reservation_build_registrants(); 1199 1200 /* ACQUIRE: Host B with g_ctrlr_B get reservation with 1201 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY 1202 */ 1203 rsp->status.sc = 0xff; 1204 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 1205 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0); 1206 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req); 1207 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1208 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1209 1210 /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B clear the reservation. 1211 * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C. 1212 */ 1213 rsp->status.sc = 0xff; 1214 g_ctrlr1_A.num_avail_log_pages = 0; 1215 g_ctrlr2_A.num_avail_log_pages = 0; 1216 g_ctrlr_B.num_avail_log_pages = 5; 1217 g_ctrlr_C.num_avail_log_pages = 0; 1218 ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0, 1219 0, 0xb1); 1220 nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req); 1221 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1222 SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0); 1223 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages); 1224 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages); 1225 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages); 1226 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages); 1227 1228 ut_reservation_free_req(req); 1229 ut_reservation_deinit(); 1230 } 1231 1232 static void 1233 test_reservation_preempt_notification(void) 1234 { 1235 struct spdk_nvmf_request *req; 1236 struct spdk_nvme_cpl *rsp; 1237 1238 ut_reservation_init(); 1239 1240 req = ut_reservation_build_req(16); 1241 SPDK_CU_ASSERT_FATAL(req != NULL); 1242 rsp = &req->rsp->nvme_cpl; 1243 1244 ut_reservation_build_registrants(); 1245 1246 /* ACQUIRE: Host B with g_ctrlr_B get reservation with 1247 * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY 1248 */ 1249 rsp->status.sc = 0xff; 1250 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0, 1251 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0); 1252 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req); 1253 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1254 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY); 1255 1256 /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B, 1257 * g_ctrlr_B registrant is unregistered, and reservation is preempted. 1258 * Registration Preempted notification sends to g_ctrlr_B. 1259 * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A. 1260 */ 1261 rsp->status.sc = 0xff; 1262 g_ctrlr1_A.num_avail_log_pages = 0; 1263 g_ctrlr2_A.num_avail_log_pages = 0; 1264 g_ctrlr_B.num_avail_log_pages = 0; 1265 g_ctrlr_C.num_avail_log_pages = 5; 1266 ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0, 1267 SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0xb1); 1268 nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req); 1269 SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS); 1270 SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1271 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages); 1272 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages); 1273 SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_B.num_avail_log_pages); 1274 SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages); 1275 1276 ut_reservation_free_req(req); 1277 ut_reservation_deinit(); 1278 } 1279 1280 static int 1281 nvmf_tgt_create_poll_group(void *io_device, void *ctx_buf) 1282 { 1283 return 0; 1284 } 1285 1286 static void 1287 nvmf_tgt_destroy_poll_group(void *io_device, void *ctx_buf) 1288 { 1289 } 1290 1291 static void 1292 test_spdk_nvmf_ns_event(void) 1293 { 1294 struct spdk_nvmf_tgt tgt = {}; 1295 struct spdk_nvmf_subsystem subsystem = { 1296 .max_nsid = 1024, 1297 .ns = NULL, 1298 .tgt = &tgt, 1299 }; 1300 struct spdk_nvmf_ctrlr ctrlr = { 1301 .subsys = &subsystem 1302 }; 1303 struct spdk_nvmf_ns_opts ns_opts; 1304 uint32_t nsid; 1305 struct spdk_bdev *bdev; 1306 1307 subsystem.ns = calloc(subsystem.max_nsid, sizeof(struct spdk_nvmf_subsystem_ns *)); 1308 SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL); 1309 subsystem.ana_group = calloc(subsystem.max_nsid, sizeof(uint32_t)); 1310 SPDK_CU_ASSERT_FATAL(subsystem.ana_group != NULL); 1311 1312 tgt.max_subsystems = 1024; 1313 tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems); 1314 RB_INIT(&tgt.subsystems); 1315 1316 spdk_io_device_register(&tgt, 1317 nvmf_tgt_create_poll_group, 1318 nvmf_tgt_destroy_poll_group, 1319 sizeof(struct spdk_nvmf_poll_group), 1320 NULL); 1321 1322 /* Add one namespace */ 1323 spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts)); 1324 nsid = spdk_nvmf_subsystem_add_ns_ext(&subsystem, "bdev1", &ns_opts, sizeof(ns_opts), NULL); 1325 CU_ASSERT(nsid == 1); 1326 CU_ASSERT(NULL != subsystem.ns[0]); 1327 CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &g_bdevs[nsid - 1]); 1328 1329 bdev = subsystem.ns[nsid - 1]->bdev; 1330 1331 /* Add one controller */ 1332 TAILQ_INIT(&subsystem.ctrlrs); 1333 TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlr, link); 1334 1335 /* Namespace resize event */ 1336 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1337 g_ns_changed_nsid = 0xFFFFFFFF; 1338 g_ns_changed_ctrlr = NULL; 1339 nvmf_ns_event(SPDK_BDEV_EVENT_RESIZE, bdev, subsystem.ns[0]); 1340 CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state); 1341 1342 poll_threads(); 1343 CU_ASSERT(1 == g_ns_changed_nsid); 1344 CU_ASSERT(&ctrlr == g_ns_changed_ctrlr); 1345 CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state); 1346 1347 /* Namespace remove event */ 1348 subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE; 1349 g_ns_changed_nsid = 0xFFFFFFFF; 1350 g_ns_changed_ctrlr = NULL; 1351 nvmf_ns_event(SPDK_BDEV_EVENT_REMOVE, bdev, subsystem.ns[0]); 1352 CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state); 1353 CU_ASSERT(0xFFFFFFFF == g_ns_changed_nsid); 1354 CU_ASSERT(NULL == g_ns_changed_ctrlr); 1355 1356 poll_threads(); 1357 CU_ASSERT(1 == g_ns_changed_nsid); 1358 CU_ASSERT(&ctrlr == g_ns_changed_ctrlr); 1359 CU_ASSERT(NULL == subsystem.ns[0]); 1360 CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state); 1361 1362 spdk_io_device_unregister(&tgt, NULL); 1363 1364 poll_threads(); 1365 1366 free(subsystem.ns); 1367 free(subsystem.ana_group); 1368 spdk_bit_array_free(&tgt.subsystem_ids); 1369 } 1370 1371 static void 1372 test_nvmf_ns_reservation_add_remove_registrant(void) 1373 { 1374 struct spdk_nvmf_ns ns = {}; 1375 struct spdk_nvmf_ctrlr ctrlr = {}; 1376 struct spdk_nvmf_registrant *reg = NULL; 1377 int rc; 1378 1379 TAILQ_INIT(&ns.registrants); 1380 spdk_uuid_generate(&ctrlr.hostid); 1381 1382 rc = nvmf_ns_reservation_add_registrant(&ns, &ctrlr, 0xa11); 1383 CU_ASSERT(rc == 0); 1384 reg = TAILQ_FIRST(&ns.registrants); 1385 SPDK_CU_ASSERT_FATAL(reg != NULL); 1386 CU_ASSERT(ns.gen == 1); 1387 CU_ASSERT(reg->rkey == 0xa11); 1388 CU_ASSERT(!strncmp((uint8_t *)®->hostid, (uint8_t *)&ctrlr.hostid, sizeof(ctrlr.hostid))); 1389 1390 nvmf_ns_reservation_remove_registrant(&ns, reg); 1391 CU_ASSERT(TAILQ_EMPTY(&ns.registrants)); 1392 CU_ASSERT(ns.gen == 2); 1393 } 1394 1395 static void 1396 test_nvmf_subsystem_destroy_cb(void *cb_arg) 1397 { 1398 } 1399 1400 static void 1401 test_nvmf_subsystem_add_ctrlr(void) 1402 { 1403 int rc; 1404 struct spdk_nvmf_ctrlr ctrlr = {}; 1405 struct spdk_nvmf_tgt tgt = {}; 1406 char nqn[256] = "nqn.2016-06.io.spdk:subsystem1"; 1407 struct spdk_nvmf_subsystem *subsystem = NULL; 1408 1409 tgt.max_subsystems = 1024; 1410 tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems); 1411 RB_INIT(&tgt.subsystems); 1412 1413 subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0); 1414 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 1415 ctrlr.subsys = subsystem; 1416 1417 ctrlr.dynamic_ctrlr = true; 1418 rc = nvmf_subsystem_add_ctrlr(subsystem, &ctrlr); 1419 CU_ASSERT(rc == 0); 1420 CU_ASSERT(!TAILQ_EMPTY(&subsystem->ctrlrs)); 1421 CU_ASSERT(ctrlr.cntlid == 1); 1422 CU_ASSERT(nvmf_subsystem_get_ctrlr(subsystem, 1) == &ctrlr); 1423 1424 nvmf_subsystem_remove_ctrlr(subsystem, &ctrlr); 1425 CU_ASSERT(TAILQ_EMPTY(&subsystem->ctrlrs)); 1426 rc = spdk_nvmf_subsystem_destroy(subsystem, test_nvmf_subsystem_destroy_cb, NULL); 1427 CU_ASSERT(rc == 0); 1428 spdk_bit_array_free(&tgt.subsystem_ids); 1429 } 1430 1431 static void 1432 _add_transport_cb(void *arg, int status) 1433 { 1434 CU_ASSERT(status == 0); 1435 } 1436 1437 static int 1438 transport_subsystem_add_host_err(struct spdk_nvmf_transport *transport, 1439 const struct spdk_nvmf_subsystem *subsystem, 1440 const char *hostnqn, 1441 const struct spdk_json_val *transport_specific) 1442 { 1443 return -1; 1444 } 1445 1446 void 1447 spdk_nvmf_tgt_add_transport(struct spdk_nvmf_tgt *tgt, 1448 struct spdk_nvmf_transport *transport, 1449 spdk_nvmf_tgt_add_transport_done_fn cb_fn, 1450 void *cb_arg) 1451 { 1452 TAILQ_INSERT_TAIL(&tgt->transports, transport, link); 1453 } 1454 1455 static struct spdk_nvmf_transport * 1456 transport_create(struct spdk_nvmf_transport_opts *opts) 1457 { 1458 return &g_transport; 1459 } 1460 1461 static void 1462 test_spdk_nvmf_subsystem_add_host(void) 1463 { 1464 struct spdk_nvmf_tgt tgt = {}; 1465 struct spdk_nvmf_subsystem *subsystem = NULL; 1466 int rc; 1467 const char hostnqn[] = "nqn.2016-06.io.spdk:host1"; 1468 const char subsystemnqn[] = "nqn.2016-06.io.spdk:subsystem1"; 1469 struct spdk_nvmf_transport_opts opts = { 1470 .opts_size = sizeof(struct spdk_nvmf_transport_opts), 1471 .io_unit_size = 8192 1472 }; 1473 const struct spdk_nvmf_transport_ops test_ops = { 1474 .name = "transport_ut", 1475 .create = transport_create, 1476 .subsystem_add_host = transport_subsystem_add_host_err, 1477 }; 1478 struct spdk_nvmf_transport *transport; 1479 1480 tgt.max_subsystems = 1024; 1481 tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems); 1482 RB_INIT(&tgt.subsystems); 1483 1484 subsystem = spdk_nvmf_subsystem_create(&tgt, subsystemnqn, SPDK_NVMF_SUBTYPE_NVME, 0); 1485 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 1486 CU_ASSERT_STRING_EQUAL(subsystem->subnqn, subsystemnqn); 1487 1488 rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL); 1489 CU_ASSERT(rc == 0); 1490 CU_ASSERT(!TAILQ_EMPTY(&subsystem->hosts)); 1491 1492 /* Add existing nqn, this function is allowed to be called if the nqn was previously added. */ 1493 rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL); 1494 CU_ASSERT(rc == 0); 1495 1496 rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn); 1497 CU_ASSERT(rc == 0); 1498 CU_ASSERT(TAILQ_EMPTY(&subsystem->hosts)); 1499 1500 /* No available nqn */ 1501 rc = spdk_nvmf_subsystem_remove_host(subsystem, hostnqn); 1502 CU_ASSERT(rc == -ENOENT); 1503 1504 /* Ensure hostnqn list remains empty after transport callback fails */ 1505 spdk_nvmf_transport_register(&test_ops); 1506 transport = spdk_nvmf_transport_create("transport_ut", &opts); 1507 SPDK_CU_ASSERT_FATAL(transport != NULL); 1508 1509 TAILQ_INIT(&tgt.transports); 1510 spdk_nvmf_tgt_add_transport(&tgt, transport, _add_transport_cb, 0); 1511 1512 rc = spdk_nvmf_subsystem_add_host(subsystem, hostnqn, NULL); 1513 CU_ASSERT(rc != 0); 1514 CU_ASSERT(TAILQ_EMPTY(&subsystem->hosts)); 1515 1516 spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 1517 spdk_bit_array_free(&tgt.subsystem_ids); 1518 } 1519 1520 static void 1521 test_nvmf_ns_reservation_report(void) 1522 { 1523 struct spdk_nvmf_ns ns = {}; 1524 struct spdk_nvmf_ctrlr ctrlr = {}; 1525 struct spdk_nvmf_request req = {}; 1526 union nvmf_h2c_msg cmd = {}; 1527 union nvmf_c2h_msg rsp = {}; 1528 struct spdk_nvme_registered_ctrlr_extended_data *ctrlr_data; 1529 struct spdk_nvme_reservation_status_extended_data *status_data; 1530 struct spdk_nvmf_registrant *reg; 1531 void *data; 1532 1533 data = calloc(1, sizeof(*status_data) + sizeof(*ctrlr_data) * 2); 1534 reg = calloc(2, sizeof(struct spdk_nvmf_registrant)); 1535 SPDK_CU_ASSERT_FATAL(data != NULL && reg != NULL); 1536 1537 req.length = sizeof(*status_data) + sizeof(*ctrlr_data) * 2; 1538 spdk_iov_one(req.iov, &req.iovcnt, data, req.length); 1539 1540 req.cmd = &cmd; 1541 req.rsp = &rsp; 1542 ns.gen = 1; 1543 ns.rtype = SPDK_NVME_RESERVE_WRITE_EXCLUSIVE; 1544 ns.ptpl_activated = true; 1545 cmd.nvme_cmd.cdw11_bits.resv_report.eds = true; 1546 cmd.nvme_cmd.cdw10 = 100; 1547 reg[0].rkey = 0xa; 1548 reg[1].rkey = 0xb; 1549 spdk_uuid_generate(®[0].hostid); 1550 spdk_uuid_generate(®[1].hostid); 1551 TAILQ_INIT(&ns.registrants); 1552 TAILQ_INSERT_TAIL(&ns.registrants, ®[0], link); 1553 TAILQ_INSERT_TAIL(&ns.registrants, ®[1], link); 1554 1555 nvmf_ns_reservation_report(&ns, &ctrlr, &req); 1556 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1557 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS); 1558 /* Get ctrlr data and status data pointers */ 1559 ctrlr_data = (void *)((char *)req.iov[0].iov_base + sizeof(*status_data)); 1560 status_data = (void *)req.iov[0].iov_base; 1561 SPDK_CU_ASSERT_FATAL(status_data != NULL && ctrlr_data != NULL); 1562 CU_ASSERT(status_data->data.gen == 1); 1563 CU_ASSERT(status_data->data.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE); 1564 CU_ASSERT(status_data->data.ptpls == true); 1565 CU_ASSERT(status_data->data.regctl == 2); 1566 CU_ASSERT(ctrlr_data->cntlid == 0xffff); 1567 CU_ASSERT(ctrlr_data->rcsts.status == false); 1568 CU_ASSERT(ctrlr_data->rkey == 0xa); 1569 CU_ASSERT(!spdk_uuid_compare((struct spdk_uuid *)ctrlr_data->hostid, ®[0].hostid)); 1570 /* Check second ctrlr data */ 1571 ctrlr_data++; 1572 CU_ASSERT(ctrlr_data->cntlid == 0xffff); 1573 CU_ASSERT(ctrlr_data->rcsts.status == false); 1574 CU_ASSERT(ctrlr_data->rkey == 0xb); 1575 CU_ASSERT(!spdk_uuid_compare((struct spdk_uuid *)ctrlr_data->hostid, ®[1].hostid)); 1576 1577 /* extended controller data structure */ 1578 spdk_iov_memset(req.iov, req.iovcnt, 0); 1579 memset(req.rsp, 0, sizeof(*req.rsp)); 1580 cmd.nvme_cmd.cdw11_bits.resv_report.eds = false; 1581 1582 nvmf_ns_reservation_report(&ns, &ctrlr, &req); 1583 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_HOSTID_INCONSISTENT_FORMAT); 1584 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1585 1586 /* Transfer length invalid */ 1587 spdk_iov_memset(req.iov, req.iovcnt, 0); 1588 memset(req.rsp, 0, sizeof(*req.rsp)); 1589 cmd.nvme_cmd.cdw11_bits.resv_report.eds = true; 1590 cmd.nvme_cmd.cdw10 = 0; 1591 1592 nvmf_ns_reservation_report(&ns, &ctrlr, &req); 1593 CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INTERNAL_DEVICE_ERROR); 1594 CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1595 1596 free(req.iov[0].iov_base); 1597 free(reg); 1598 } 1599 1600 static void 1601 test_nvmf_nqn_is_valid(void) 1602 { 1603 bool rc; 1604 char uuid[SPDK_NVMF_UUID_STRING_LEN + 1] = {}; 1605 char nqn[SPDK_NVMF_NQN_MAX_LEN + 1] = {}; 1606 struct spdk_uuid s_uuid = {}; 1607 1608 spdk_uuid_generate(&s_uuid); 1609 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid); 1610 1611 /* discovery nqn */ 1612 snprintf(nqn, sizeof(nqn), "%s", SPDK_NVMF_DISCOVERY_NQN); 1613 1614 rc = nvmf_nqn_is_valid(nqn); 1615 CU_ASSERT(rc == true); 1616 1617 /* nqn with uuid */ 1618 memset(nqn, 0xff, sizeof(nqn)); 1619 snprintf(nqn, sizeof(nqn), "%s%s", SPDK_NVMF_NQN_UUID_PRE, uuid); 1620 1621 rc = nvmf_nqn_is_valid(nqn); 1622 CU_ASSERT(rc == true); 1623 1624 /* Check nqn valid reverse domain */ 1625 memset(nqn, 0xff, sizeof(nqn)); 1626 snprintf(nqn, sizeof(nqn), "%s", "nqn.2016-06.io.spdk:cnode1"); 1627 1628 rc = nvmf_nqn_is_valid(nqn); 1629 CU_ASSERT(rc == true); 1630 1631 /* Invalid nqn length */ 1632 memset(nqn, 0xff, sizeof(nqn)); 1633 snprintf(nqn, sizeof(nqn), "%s", "nqn."); 1634 1635 rc = nvmf_nqn_is_valid(nqn); 1636 CU_ASSERT(rc == false); 1637 1638 /* Copy uuid to the nqn string, but omit the last character to make it invalid */ 1639 memset(nqn, 0, SPDK_NVMF_NQN_MAX_LEN + 1); 1640 snprintf(nqn, sizeof(nqn), "%s", SPDK_NVMF_NQN_UUID_PRE); 1641 memcpy(&nqn[SPDK_NVMF_NQN_UUID_PRE_LEN], uuid, SPDK_NVMF_UUID_STRING_LEN - 1); 1642 1643 rc = nvmf_nqn_is_valid(nqn); 1644 CU_ASSERT(rc == false); 1645 1646 /* Invalid domain */ 1647 memset(nqn, 0xff, SPDK_NVMF_NQN_MAX_LEN + 1); 1648 snprintf(nqn, sizeof(nqn), "%s", "nqn.2016-06.io...spdk:cnode1"); 1649 1650 rc = nvmf_nqn_is_valid(nqn); 1651 CU_ASSERT(rc == false); 1652 } 1653 1654 static void 1655 test_nvmf_ns_reservation_restore(void) 1656 { 1657 struct spdk_nvmf_ns ns = {}; 1658 struct spdk_nvmf_reservation_info info = {}; 1659 struct spdk_bdev bdev = {}; 1660 struct spdk_uuid s_uuid = {}; 1661 struct spdk_nvmf_registrant *reg0, *reg1; 1662 char uuid[SPDK_UUID_STRING_LEN] = {}; 1663 int rc; 1664 1665 ns.bdev = &bdev; 1666 TAILQ_INIT(&ns.registrants); 1667 info.ptpl_activated = true; 1668 info.num_regs = 2; 1669 info.rtype = SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS; 1670 info.registrants[0].rkey = 0xb; 1671 info.registrants[1].rkey = 0xc; 1672 1673 /* Generate and prepare uuids, make sure bdev and info uuid are the same */ 1674 spdk_uuid_generate(&s_uuid); 1675 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid); 1676 snprintf(info.holder_uuid, SPDK_UUID_STRING_LEN, "%s", uuid); 1677 snprintf(info.bdev_uuid, SPDK_UUID_STRING_LEN, "%s", uuid); 1678 snprintf(info.registrants[0].host_uuid, SPDK_UUID_STRING_LEN, "%s", uuid); 1679 spdk_uuid_copy(&bdev.uuid, &s_uuid); 1680 spdk_uuid_generate(&s_uuid); 1681 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid); 1682 snprintf(info.registrants[1].host_uuid, SPDK_UUID_STRING_LEN, "%s", uuid); 1683 1684 /* info->rkey not exist in registrants */ 1685 info.crkey = 0xa; 1686 1687 rc = nvmf_ns_reservation_restore(&ns, &info); 1688 CU_ASSERT(rc == -EINVAL); 1689 1690 /* info->rkey exists in registrants */ 1691 info.crkey = 0xb; 1692 1693 rc = nvmf_ns_reservation_restore(&ns, &info); 1694 CU_ASSERT(rc == 0); 1695 CU_ASSERT(ns.crkey == 0xb); 1696 CU_ASSERT(ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS); 1697 CU_ASSERT(ns.ptpl_activated == true); 1698 /* Check two registrant`s rkey */ 1699 reg0 = TAILQ_FIRST(&ns.registrants); 1700 reg1 = TAILQ_NEXT(reg0, link); 1701 CU_ASSERT(ns.holder == reg0); 1702 CU_ASSERT(reg0->rkey = 0xb); 1703 CU_ASSERT(reg1->rkey = 0xc); 1704 1705 rc = nvmf_ns_reservation_clear_all_registrants(&ns); 1706 CU_ASSERT(rc == 2); 1707 CU_ASSERT(TAILQ_EMPTY(&ns.registrants)); 1708 1709 /* Existing bdev UUID is different with configuration */ 1710 spdk_uuid_generate(&s_uuid); 1711 spdk_uuid_fmt_lower(uuid, sizeof(uuid), &s_uuid); 1712 snprintf(info.bdev_uuid, SPDK_UUID_STRING_LEN, "%s", uuid); 1713 spdk_uuid_generate(&s_uuid); 1714 spdk_uuid_copy(&bdev.uuid, &s_uuid); 1715 1716 rc = nvmf_ns_reservation_restore(&ns, &info); 1717 CU_ASSERT(rc == -EINVAL); 1718 } 1719 1720 static void 1721 test_nvmf_subsystem_state_change(void) 1722 { 1723 struct spdk_nvmf_tgt tgt = {}; 1724 struct spdk_nvmf_subsystem *subsystem, *discovery_subsystem; 1725 int rc; 1726 1727 tgt.max_subsystems = 1024; 1728 tgt.subsystem_ids = spdk_bit_array_create(tgt.max_subsystems); 1729 RB_INIT(&tgt.subsystems); 1730 1731 discovery_subsystem = spdk_nvmf_subsystem_create(&tgt, SPDK_NVMF_DISCOVERY_NQN, 1732 SPDK_NVMF_SUBTYPE_DISCOVERY_CURRENT, 0); 1733 SPDK_CU_ASSERT_FATAL(discovery_subsystem != NULL); 1734 subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1", 1735 SPDK_NVMF_SUBTYPE_NVME, 0); 1736 SPDK_CU_ASSERT_FATAL(subsystem != NULL); 1737 1738 spdk_io_device_register(&tgt, 1739 nvmf_tgt_create_poll_group, 1740 nvmf_tgt_destroy_poll_group, 1741 sizeof(struct spdk_nvmf_poll_group), 1742 NULL); 1743 1744 rc = spdk_nvmf_subsystem_start(discovery_subsystem, NULL, NULL); 1745 CU_ASSERT(rc == 0); 1746 poll_threads(); 1747 CU_ASSERT(discovery_subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE); 1748 rc = spdk_nvmf_subsystem_start(subsystem, NULL, NULL); 1749 CU_ASSERT(rc == 0); 1750 poll_threads(); 1751 CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_ACTIVE); 1752 1753 rc = spdk_nvmf_subsystem_pause(subsystem, SPDK_NVME_GLOBAL_NS_TAG, NULL, NULL); 1754 CU_ASSERT(rc == 0); 1755 rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL); 1756 CU_ASSERT(rc == -EBUSY); 1757 poll_threads(); 1758 CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_PAUSED); 1759 1760 rc = spdk_nvmf_subsystem_stop(discovery_subsystem, NULL, NULL); 1761 CU_ASSERT(rc == 0); 1762 poll_threads(); 1763 CU_ASSERT(discovery_subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE); 1764 rc = spdk_nvmf_subsystem_stop(subsystem, NULL, NULL); 1765 CU_ASSERT(rc == 0); 1766 poll_threads(); 1767 CU_ASSERT(subsystem->state == SPDK_NVMF_SUBSYSTEM_INACTIVE); 1768 1769 rc = spdk_nvmf_subsystem_destroy(subsystem, NULL, NULL); 1770 CU_ASSERT(rc == 0); 1771 rc = spdk_nvmf_subsystem_destroy(discovery_subsystem, NULL, NULL); 1772 CU_ASSERT(rc == 0); 1773 1774 spdk_io_device_unregister(&tgt, NULL); 1775 poll_threads(); 1776 1777 spdk_bit_array_free(&tgt.subsystem_ids); 1778 } 1779 1780 int 1781 main(int argc, char **argv) 1782 { 1783 CU_pSuite suite = NULL; 1784 unsigned int num_failures; 1785 1786 CU_initialize_registry(); 1787 1788 suite = CU_add_suite("nvmf", NULL, NULL); 1789 1790 CU_ADD_TEST(suite, nvmf_test_create_subsystem); 1791 CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_ns); 1792 CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_set_sn); 1793 CU_ADD_TEST(suite, test_reservation_register); 1794 CU_ADD_TEST(suite, test_reservation_register_with_ptpl); 1795 CU_ADD_TEST(suite, test_reservation_acquire_preempt_1); 1796 CU_ADD_TEST(suite, test_reservation_acquire_release_with_ptpl); 1797 CU_ADD_TEST(suite, test_reservation_release); 1798 CU_ADD_TEST(suite, test_reservation_unregister_notification); 1799 CU_ADD_TEST(suite, test_reservation_release_notification); 1800 CU_ADD_TEST(suite, test_reservation_release_notification_write_exclusive); 1801 CU_ADD_TEST(suite, test_reservation_clear_notification); 1802 CU_ADD_TEST(suite, test_reservation_preempt_notification); 1803 CU_ADD_TEST(suite, test_spdk_nvmf_ns_event); 1804 CU_ADD_TEST(suite, test_nvmf_ns_reservation_add_remove_registrant); 1805 CU_ADD_TEST(suite, test_nvmf_subsystem_add_ctrlr); 1806 CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_host); 1807 CU_ADD_TEST(suite, test_nvmf_ns_reservation_report); 1808 CU_ADD_TEST(suite, test_nvmf_nqn_is_valid); 1809 CU_ADD_TEST(suite, test_nvmf_ns_reservation_restore); 1810 CU_ADD_TEST(suite, test_nvmf_subsystem_state_change); 1811 1812 allocate_threads(1); 1813 set_thread(0); 1814 1815 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1816 CU_cleanup_registry(); 1817 1818 free_threads(); 1819 1820 return num_failures; 1821 } 1822