1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 41 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 42 int, 43 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 44 0); 45 46 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 47 struct spdk_nvmf_ctrlr *, 48 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 49 NULL); 50 51 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 52 struct spdk_nvmf_subsystem *, 53 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 54 NULL); 55 56 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 57 bool, 58 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 59 true); 60 61 DEFINE_STUB(nvmf_subsystem_find_listener, 62 struct spdk_nvmf_subsystem_listener *, 63 (struct spdk_nvmf_subsystem *subsystem, 64 const struct spdk_nvme_transport_id *trid), 65 (void *)0x1); 66 67 DEFINE_STUB(spdk_nvmf_ns_find_host, 68 struct spdk_nvmf_host *, 69 (struct spdk_nvmf_ns *ns, const char *hostnqn), 70 NULL); 71 72 DEFINE_STUB_V(nvmf_get_discovery_log_page, 73 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 74 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 75 76 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 77 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 78 79 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 80 struct spdk_nvmf_ns *, 81 (struct spdk_nvmf_subsystem *subsystem), 82 NULL); 83 84 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 85 struct spdk_nvmf_ns *, 86 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 87 NULL); 88 89 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 90 bool, 91 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 92 true); 93 94 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 95 bool, 96 (struct spdk_nvmf_ctrlr *ctrlr), 97 false); 98 99 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 100 bool, 101 (struct spdk_nvmf_ctrlr *ctrlr), 102 false); 103 104 DEFINE_STUB(nvmf_ctrlr_copy_supported, 105 bool, 106 (struct spdk_nvmf_ctrlr *ctrlr), 107 false); 108 109 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 110 int, 111 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 112 struct spdk_nvmf_request *req), 113 0); 114 115 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 116 int, 117 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 118 struct spdk_nvmf_request *req), 119 0); 120 121 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 122 int, 123 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 124 struct spdk_nvmf_request *req), 125 0); 126 127 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 128 int, 129 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 130 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 131 0); 132 133 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 134 int, 135 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 136 struct spdk_nvmf_request *req), 137 0); 138 139 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 140 int, 141 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 142 struct spdk_nvmf_request *req), 143 0); 144 145 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 146 int, 147 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 148 struct spdk_nvmf_request *req), 149 0); 150 151 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 152 int, 153 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 154 struct spdk_nvmf_request *req), 155 0); 156 157 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 158 int, 159 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 160 struct spdk_nvmf_request *req), 161 0); 162 163 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 164 int, 165 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 166 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 167 0); 168 169 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 170 bool, 171 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 172 false); 173 174 DEFINE_STUB(nvmf_transport_req_complete, 175 int, 176 (struct spdk_nvmf_request *req), 177 0); 178 179 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 180 bool, 181 (struct spdk_bdev *bdev), 182 false); 183 184 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 185 int, 186 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 187 struct spdk_nvmf_request *req), 188 0); 189 190 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 191 192 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 193 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 194 struct spdk_nvmf_transport *transport)); 195 196 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 197 int, 198 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 199 0); 200 201 DEFINE_STUB(spdk_sock_group_get_ctx, 202 void *, 203 (struct spdk_sock_group *group), 204 NULL); 205 206 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 207 208 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 209 enum spdk_nvme_transport_type trtype)); 210 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 211 212 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 213 214 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 215 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 216 217 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 218 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 219 220 DEFINE_STUB(nvmf_transport_req_free, 221 int, 222 (struct spdk_nvmf_request *req), 223 0); 224 225 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 226 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 227 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 228 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 229 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 230 231 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 232 (const struct spdk_bdev *bdev), 0); 233 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 234 (const struct spdk_bdev *bdev), 0); 235 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 236 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 237 238 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 239 (const struct spdk_nvme_ns_data *nsdata), 0); 240 241 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 242 243 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 244 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 245 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 246 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 247 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 248 249 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 250 251 struct spdk_io_channel * 252 spdk_accel_get_io_channel(void) 253 { 254 return spdk_get_io_channel(g_accel_p); 255 } 256 257 DEFINE_STUB(spdk_accel_submit_crc32cv, 258 int, 259 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 260 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 261 0); 262 263 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 264 int, 265 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 266 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 267 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 268 0) 269 270 struct spdk_bdev { 271 int ut_mock; 272 uint64_t blockcnt; 273 }; 274 275 int 276 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 277 const struct spdk_nvme_transport_id *trid2) 278 { 279 return 0; 280 } 281 282 const char * 283 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 284 { 285 switch (trtype) { 286 case SPDK_NVME_TRANSPORT_PCIE: 287 return "PCIe"; 288 case SPDK_NVME_TRANSPORT_RDMA: 289 return "RDMA"; 290 case SPDK_NVME_TRANSPORT_FC: 291 return "FC"; 292 default: 293 return NULL; 294 } 295 } 296 297 int 298 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 299 { 300 int len, i; 301 302 if (trstring == NULL) { 303 return -EINVAL; 304 } 305 306 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 307 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 308 return -EINVAL; 309 } 310 311 /* cast official trstring to uppercase version of input. */ 312 for (i = 0; i < len; i++) { 313 trid->trstring[i] = toupper(trstring[i]); 314 } 315 return 0; 316 } 317 318 int 319 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 320 { 321 return 0; 322 } 323 324 int 325 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 326 struct spdk_nvmf_transport_poll_group *group, 327 struct spdk_nvmf_transport *transport, 328 uint32_t length) 329 { 330 /* length more than 1 io unit length will fail. */ 331 if (length >= transport->opts.io_unit_size) { 332 return -EINVAL; 333 } 334 335 req->iovcnt = 1; 336 req->iov[0].iov_base = (void *)0xDEADBEEF; 337 338 return 0; 339 } 340 341 342 void 343 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 344 bool dif_insert_or_strip) 345 { 346 uint64_t num_blocks; 347 348 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 349 num_blocks = ns->bdev->blockcnt; 350 nsdata->nsze = num_blocks; 351 nsdata->ncap = num_blocks; 352 nsdata->nuse = num_blocks; 353 nsdata->nlbaf = 0; 354 nsdata->flbas.format = 0; 355 nsdata->flbas.msb_format = 0; 356 nsdata->lbaf[0].lbads = spdk_u32log2(512); 357 } 358 359 const char * 360 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 361 { 362 return subsystem->sn; 363 } 364 365 const char * 366 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 367 { 368 return subsystem->mn; 369 } 370 371 static void 372 test_nvmf_tcp_create(void) 373 { 374 struct spdk_thread *thread; 375 struct spdk_nvmf_transport *transport; 376 struct spdk_nvmf_tcp_transport *ttransport; 377 struct spdk_nvmf_transport_opts opts; 378 379 thread = spdk_thread_create(NULL, NULL); 380 SPDK_CU_ASSERT_FATAL(thread != NULL); 381 spdk_set_thread(thread); 382 383 /* case 1 */ 384 memset(&opts, 0, sizeof(opts)); 385 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 386 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 387 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 388 opts.max_io_size = UT_MAX_IO_SIZE; 389 opts.io_unit_size = UT_IO_UNIT_SIZE; 390 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 391 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 392 /* expect success */ 393 transport = nvmf_tcp_create(&opts); 394 CU_ASSERT_PTR_NOT_NULL(transport); 395 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 396 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 397 transport->opts = opts; 398 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 399 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 400 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 401 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 402 /* destroy transport */ 403 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 404 405 /* case 2 */ 406 memset(&opts, 0, sizeof(opts)); 407 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 408 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 409 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 410 opts.max_io_size = UT_MAX_IO_SIZE; 411 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 412 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 413 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 414 /* expect success */ 415 transport = nvmf_tcp_create(&opts); 416 CU_ASSERT_PTR_NOT_NULL(transport); 417 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 418 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 419 transport->opts = opts; 420 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 421 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 422 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 423 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 424 /* destroy transport */ 425 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 426 427 /* case 3 */ 428 memset(&opts, 0, sizeof(opts)); 429 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 430 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 431 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 432 opts.max_io_size = UT_MAX_IO_SIZE; 433 opts.io_unit_size = 16; 434 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 435 /* expect fails */ 436 transport = nvmf_tcp_create(&opts); 437 CU_ASSERT_PTR_NULL(transport); 438 439 spdk_thread_exit(thread); 440 while (!spdk_thread_is_exited(thread)) { 441 spdk_thread_poll(thread, 0, 0); 442 } 443 spdk_thread_destroy(thread); 444 } 445 446 static void 447 test_nvmf_tcp_destroy(void) 448 { 449 struct spdk_thread *thread; 450 struct spdk_nvmf_transport *transport; 451 struct spdk_nvmf_transport_opts opts; 452 453 thread = spdk_thread_create(NULL, NULL); 454 SPDK_CU_ASSERT_FATAL(thread != NULL); 455 spdk_set_thread(thread); 456 457 /* case 1 */ 458 memset(&opts, 0, sizeof(opts)); 459 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 460 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 461 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 462 opts.max_io_size = UT_MAX_IO_SIZE; 463 opts.io_unit_size = UT_IO_UNIT_SIZE; 464 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 465 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 466 transport = nvmf_tcp_create(&opts); 467 CU_ASSERT_PTR_NOT_NULL(transport); 468 transport->opts = opts; 469 /* destroy transport */ 470 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 471 472 spdk_thread_exit(thread); 473 while (!spdk_thread_is_exited(thread)) { 474 spdk_thread_poll(thread, 0, 0); 475 } 476 spdk_thread_destroy(thread); 477 } 478 479 static void 480 init_accel(void) 481 { 482 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 483 sizeof(int), "accel_p"); 484 } 485 486 static void 487 fini_accel(void) 488 { 489 spdk_io_device_unregister(g_accel_p, NULL); 490 } 491 492 static void 493 test_nvmf_tcp_poll_group_create(void) 494 { 495 struct spdk_nvmf_transport *transport; 496 struct spdk_nvmf_transport_poll_group *group; 497 struct spdk_nvmf_tcp_poll_group *tgroup; 498 struct spdk_thread *thread; 499 struct spdk_nvmf_transport_opts opts; 500 struct spdk_sock_group grp = {}; 501 502 thread = spdk_thread_create(NULL, NULL); 503 SPDK_CU_ASSERT_FATAL(thread != NULL); 504 spdk_set_thread(thread); 505 506 init_accel(); 507 508 memset(&opts, 0, sizeof(opts)); 509 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 510 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 511 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 512 opts.max_io_size = UT_MAX_IO_SIZE; 513 opts.io_unit_size = UT_IO_UNIT_SIZE; 514 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 515 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 516 transport = nvmf_tcp_create(&opts); 517 CU_ASSERT_PTR_NOT_NULL(transport); 518 transport->opts = opts; 519 MOCK_SET(spdk_sock_group_create, &grp); 520 group = nvmf_tcp_poll_group_create(transport, NULL); 521 MOCK_CLEAR_P(spdk_sock_group_create); 522 SPDK_CU_ASSERT_FATAL(group); 523 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 524 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 525 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 526 } 527 group->transport = transport; 528 nvmf_tcp_poll_group_destroy(group); 529 nvmf_tcp_destroy(transport, NULL, NULL); 530 531 fini_accel(); 532 spdk_thread_exit(thread); 533 while (!spdk_thread_is_exited(thread)) { 534 spdk_thread_poll(thread, 0, 0); 535 } 536 spdk_thread_destroy(thread); 537 } 538 539 static void 540 test_nvmf_tcp_send_c2h_data(void) 541 { 542 struct spdk_thread *thread; 543 struct spdk_nvmf_tcp_transport ttransport = {}; 544 struct spdk_nvmf_tcp_qpair tqpair = {}; 545 struct spdk_nvmf_tcp_req tcp_req = {}; 546 struct nvme_tcp_pdu pdu = {}; 547 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 548 549 ttransport.tcp_opts.c2h_success = true; 550 thread = spdk_thread_create(NULL, NULL); 551 SPDK_CU_ASSERT_FATAL(thread != NULL); 552 spdk_set_thread(thread); 553 554 tcp_req.pdu = &pdu; 555 tcp_req.req.length = 300; 556 tcp_req.req.qpair = &tqpair.qpair; 557 558 tqpair.qpair.transport = &ttransport.transport; 559 560 /* Set qpair state to make unrelated operations NOP */ 561 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 562 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 563 564 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 565 566 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 567 tcp_req.req.iov[0].iov_len = 101; 568 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 569 tcp_req.req.iov[1].iov_len = 100; 570 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 571 tcp_req.req.iov[2].iov_len = 99; 572 tcp_req.req.iovcnt = 3; 573 tcp_req.req.length = 300; 574 575 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 576 577 c2h_data = &pdu.hdr.c2h_data; 578 CU_ASSERT(c2h_data->datao == 0); 579 CU_ASSERT(c2h_data->datal = 300); 580 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 581 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 582 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 583 584 CU_ASSERT(pdu.data_iovcnt == 3); 585 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 586 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 587 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 588 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 589 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 590 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 591 592 tcp_req.pdu_in_use = false; 593 tcp_req.rsp.cdw0 = 1; 594 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 595 596 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 597 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 598 599 ttransport.tcp_opts.c2h_success = false; 600 tcp_req.pdu_in_use = false; 601 tcp_req.rsp.cdw0 = 0; 602 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 603 604 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 605 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 606 607 tcp_req.pdu_in_use = false; 608 tcp_req.rsp.cdw0 = 1; 609 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 610 611 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 612 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 613 614 spdk_thread_exit(thread); 615 while (!spdk_thread_is_exited(thread)) { 616 spdk_thread_poll(thread, 0, 0); 617 } 618 spdk_thread_destroy(thread); 619 } 620 621 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 622 623 static void 624 test_nvmf_tcp_h2c_data_hdr_handle(void) 625 { 626 struct spdk_nvmf_tcp_transport ttransport = {}; 627 struct spdk_nvmf_tcp_qpair tqpair = {}; 628 struct nvme_tcp_pdu pdu = {}; 629 struct spdk_nvmf_tcp_req tcp_req = {}; 630 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 631 632 /* Set qpair state to make unrelated operations NOP */ 633 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 634 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 635 tqpair.resource_count = 1; 636 tqpair.reqs = &tcp_req; 637 638 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 639 tcp_req.req.iov[0].iov_len = 101; 640 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 641 tcp_req.req.iov[1].iov_len = 99; 642 tcp_req.req.iovcnt = 2; 643 tcp_req.req.length = 200; 644 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 645 646 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 647 tcp_req.req.cmd->nvme_cmd.cid = 1; 648 tcp_req.ttag = 1; 649 650 h2c_data = &pdu.hdr.h2c_data; 651 h2c_data->cccid = 1; 652 h2c_data->ttag = 1; 653 h2c_data->datao = 0; 654 h2c_data->datal = 200; 655 656 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 657 658 CU_ASSERT(pdu.data_iovcnt == 2); 659 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 660 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 661 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 662 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 663 } 664 665 666 static void 667 test_nvmf_tcp_in_capsule_data_handle(void) 668 { 669 struct spdk_nvmf_tcp_transport ttransport = {}; 670 struct spdk_nvmf_tcp_qpair tqpair = {}; 671 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 672 union nvmf_c2h_msg rsp0 = {}; 673 union nvmf_c2h_msg rsp = {}; 674 675 struct spdk_nvmf_request *req_temp = NULL; 676 struct spdk_nvmf_tcp_req tcp_req2 = {}; 677 struct spdk_nvmf_tcp_req tcp_req1 = {}; 678 679 struct spdk_nvme_tcp_cmd *capsule_data; 680 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 681 struct spdk_nvme_sgl_descriptor *sgl; 682 683 struct spdk_nvmf_transport_poll_group *group; 684 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 685 struct spdk_sock_group grp = {}; 686 687 tqpair.pdu_in_progress = &pdu_in_progress; 688 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 689 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 690 691 tcp_group.sock_group = &grp; 692 TAILQ_INIT(&tcp_group.qpairs); 693 group = &tcp_group.group; 694 group->transport = &ttransport.transport; 695 STAILQ_INIT(&group->pending_buf_queue); 696 tqpair.group = &tcp_group; 697 698 TAILQ_INIT(&tqpair.tcp_req_free_queue); 699 TAILQ_INIT(&tqpair.tcp_req_working_queue); 700 701 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 702 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 703 tqpair.qpair.transport = &ttransport.transport; 704 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 705 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 706 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 707 708 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 709 tcp_req2.req.qpair = &tqpair.qpair; 710 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 711 tcp_req2.req.rsp = &rsp; 712 713 /* init tcp_req1 */ 714 tcp_req1.req.qpair = &tqpair.qpair; 715 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 716 tcp_req1.req.rsp = &rsp0; 717 tcp_req1.state = TCP_REQUEST_STATE_NEW; 718 719 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 720 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 721 722 /* init pdu, make pdu need sgl buff */ 723 pdu = tqpair.pdu_in_progress; 724 capsule_data = &pdu->hdr.capsule_cmd; 725 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 726 sgl = &capsule_data->ccsqe.dptr.sgl1; 727 728 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 729 capsule_data->common.hlen = sizeof(*capsule_data); 730 capsule_data->common.plen = 1096; 731 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 732 733 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 734 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 735 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 736 737 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 738 739 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 740 nvmf_tcp_req_process(&ttransport, &tcp_req1); 741 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 742 743 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 744 745 /* process tqpair capsule req. but we still remain req in pending_buff. */ 746 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 747 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 748 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 749 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 750 if (req_temp == &tcp_req2.req) { 751 break; 752 } 753 } 754 CU_ASSERT(req_temp == NULL); 755 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 756 } 757 758 static void 759 test_nvmf_tcp_qpair_init_mem_resource(void) 760 { 761 int rc; 762 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 763 struct spdk_nvmf_transport transport = {}; 764 struct spdk_thread *thread; 765 766 thread = spdk_thread_create(NULL, NULL); 767 SPDK_CU_ASSERT_FATAL(thread != NULL); 768 spdk_set_thread(thread); 769 770 tqpair = calloc(1, sizeof(*tqpair)); 771 tqpair->qpair.transport = &transport; 772 773 nvmf_tcp_opts_init(&transport.opts); 774 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 775 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 776 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 777 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 778 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 779 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 780 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 781 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 782 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 783 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 784 CU_ASSERT(transport.opts.transport_specific == NULL); 785 786 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 787 CU_ASSERT(rc == 0); 788 CU_ASSERT(tqpair->host_hdgst_enable == true); 789 CU_ASSERT(tqpair->host_ddgst_enable == true); 790 791 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 792 CU_ASSERT(rc == 0); 793 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 794 CU_ASSERT(tqpair->reqs != NULL); 795 CU_ASSERT(tqpair->bufs != NULL); 796 CU_ASSERT(tqpair->pdus != NULL); 797 /* Just to check the first and last entry */ 798 CU_ASSERT(tqpair->reqs[0].ttag == 1); 799 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 800 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 801 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 802 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 803 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 804 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 805 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 806 CU_ASSERT(tqpair->reqs[127].ttag == 128); 807 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 808 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 809 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 810 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 811 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 812 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 813 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 814 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 815 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 816 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 817 CU_ASSERT(tqpair->pdu_in_progress == 818 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 819 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 820 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 821 822 /* Free all of tqpair resource */ 823 nvmf_tcp_qpair_destroy(tqpair); 824 825 spdk_thread_exit(thread); 826 while (!spdk_thread_is_exited(thread)) { 827 spdk_thread_poll(thread, 0, 0); 828 } 829 spdk_thread_destroy(thread); 830 } 831 832 static void 833 test_nvmf_tcp_send_c2h_term_req(void) 834 { 835 struct spdk_nvmf_tcp_qpair tqpair = {}; 836 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 837 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 838 uint32_t error_offset = 1; 839 840 mgmt_pdu.qpair = &tqpair; 841 tqpair.mgmt_pdu = &mgmt_pdu; 842 tqpair.pdu_in_progress = &pdu_in_progress; 843 tqpair.tcp_pdu_working_count = 1; 844 845 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 846 pdu.hdr.common.hlen = 64; 847 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 848 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 849 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 850 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 851 pdu.hdr.common.hlen); 852 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 853 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 854 855 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 856 pdu.hdr.common.hlen = 255; 857 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 858 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 859 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 860 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 861 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 862 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 863 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 864 } 865 866 static void 867 test_nvmf_tcp_send_capsule_resp_pdu(void) 868 { 869 struct spdk_nvmf_tcp_req tcp_req = {}; 870 struct spdk_nvmf_tcp_qpair tqpair = {}; 871 struct nvme_tcp_pdu pdu = {}; 872 873 tcp_req.pdu_in_use = false; 874 tcp_req.req.qpair = &tqpair.qpair; 875 tcp_req.pdu = &pdu; 876 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 877 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 878 tqpair.host_hdgst_enable = true; 879 880 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 881 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 882 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 883 SPDK_NVME_TCP_DIGEST_LEN); 884 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 885 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 886 sizeof(struct spdk_nvme_cpl))); 887 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 888 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 889 CU_ASSERT(pdu.cb_arg == &tcp_req); 890 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 891 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 892 893 /* hdgst disable */ 894 tqpair.host_hdgst_enable = false; 895 tcp_req.pdu_in_use = false; 896 memset(&pdu, 0, sizeof(pdu)); 897 898 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 899 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 900 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 901 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 902 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 903 sizeof(struct spdk_nvme_cpl))); 904 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 905 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 906 CU_ASSERT(pdu.cb_arg == &tcp_req); 907 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 908 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 909 } 910 911 static void 912 test_nvmf_tcp_icreq_handle(void) 913 { 914 struct spdk_nvmf_tcp_transport ttransport = {}; 915 struct spdk_nvmf_tcp_qpair tqpair = {}; 916 struct nvme_tcp_pdu pdu = {}; 917 struct nvme_tcp_pdu mgmt_pdu = {}; 918 struct nvme_tcp_pdu pdu_in_progress = {}; 919 struct spdk_nvme_tcp_ic_resp *ic_resp; 920 921 mgmt_pdu.qpair = &tqpair; 922 tqpair.mgmt_pdu = &mgmt_pdu; 923 tqpair.pdu_in_progress = &pdu_in_progress; 924 tqpair.tcp_pdu_working_count = 1; 925 926 /* case 1: Expected ICReq PFV 0 and got are different. */ 927 pdu.hdr.ic_req.pfv = 1; 928 929 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 930 931 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 932 933 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 934 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 935 936 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 937 938 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 939 940 /* case 3: Expect: PASS. */ 941 ttransport.transport.opts.max_io_size = 32; 942 pdu.hdr.ic_req.pfv = 0; 943 tqpair.host_hdgst_enable = false; 944 tqpair.host_ddgst_enable = false; 945 tqpair.recv_buf_size = 64; 946 pdu.hdr.ic_req.hpda = 16; 947 948 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 949 950 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 951 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 952 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 953 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 954 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 955 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 956 CU_ASSERT(ic_resp->pfv == 0); 957 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 958 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 959 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 960 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 961 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 962 } 963 964 static void 965 test_nvmf_tcp_check_xfer_type(void) 966 { 967 const uint16_t cid = 0xAA; 968 struct spdk_nvmf_tcp_transport ttransport = {}; 969 struct spdk_nvmf_tcp_qpair tqpair = {}; 970 struct nvme_tcp_pdu pdu_in_progress = {}; 971 union nvmf_c2h_msg rsp0 = {}; 972 973 struct spdk_nvmf_tcp_req tcp_req = {}; 974 struct nvme_tcp_pdu rsp_pdu = {}; 975 976 struct spdk_nvme_tcp_cmd *capsule_data; 977 struct spdk_nvme_sgl_descriptor *sgl; 978 979 struct spdk_nvmf_transport_poll_group *group; 980 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 981 struct spdk_sock_group grp = {}; 982 983 tqpair.pdu_in_progress = &pdu_in_progress; 984 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 985 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 986 987 tcp_group.sock_group = &grp; 988 TAILQ_INIT(&tcp_group.qpairs); 989 group = &tcp_group.group; 990 group->transport = &ttransport.transport; 991 STAILQ_INIT(&group->pending_buf_queue); 992 tqpair.group = &tcp_group; 993 994 TAILQ_INIT(&tqpair.tcp_req_free_queue); 995 TAILQ_INIT(&tqpair.tcp_req_working_queue); 996 997 tqpair.qpair.transport = &ttransport.transport; 998 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 999 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1000 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1001 1002 /* init tcp_req */ 1003 tcp_req.req.qpair = &tqpair.qpair; 1004 tcp_req.pdu = &rsp_pdu; 1005 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1006 tcp_req.req.rsp = &rsp0; 1007 tcp_req.state = TCP_REQUEST_STATE_NEW; 1008 1009 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1010 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1011 1012 /* init pdu, make pdu need sgl buff */ 1013 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1014 sgl = &capsule_data->ccsqe.dptr.sgl1; 1015 1016 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1017 capsule_data->common.hlen = sizeof(*capsule_data); 1018 capsule_data->common.plen = 1096; 1019 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1020 /* Need to set to a non zero valid to check it gets copied to the response */ 1021 capsule_data->ccsqe.cid = cid; 1022 1023 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1024 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1025 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1026 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1027 1028 /* Process a command and ensure that it fails and the request is set up to return an error */ 1029 nvmf_tcp_req_process(&ttransport, &tcp_req); 1030 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1031 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1032 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1033 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1034 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1035 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1036 } 1037 1038 static void 1039 test_nvmf_tcp_invalid_sgl(void) 1040 { 1041 const uint16_t cid = 0xAABB; 1042 struct spdk_nvmf_tcp_transport ttransport = {}; 1043 struct spdk_nvmf_tcp_qpair tqpair = {}; 1044 struct nvme_tcp_pdu pdu_in_progress = {}; 1045 union nvmf_c2h_msg rsp0 = {}; 1046 1047 struct spdk_nvmf_tcp_req tcp_req = {}; 1048 struct nvme_tcp_pdu rsp_pdu = {}; 1049 struct nvme_tcp_pdu mgmt_pdu = {}; 1050 1051 struct spdk_nvme_tcp_cmd *capsule_data; 1052 struct spdk_nvme_sgl_descriptor *sgl; 1053 1054 struct spdk_nvmf_transport_poll_group *group; 1055 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1056 struct spdk_sock_group grp = {}; 1057 1058 tqpair.pdu_in_progress = &pdu_in_progress; 1059 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1060 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1061 1062 tcp_group.sock_group = &grp; 1063 TAILQ_INIT(&tcp_group.qpairs); 1064 group = &tcp_group.group; 1065 group->transport = &ttransport.transport; 1066 STAILQ_INIT(&group->pending_buf_queue); 1067 tqpair.group = &tcp_group; 1068 1069 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1070 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1071 1072 tqpair.qpair.transport = &ttransport.transport; 1073 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1074 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1075 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1076 1077 /* init tcp_req */ 1078 tcp_req.req.qpair = &tqpair.qpair; 1079 tcp_req.pdu = &rsp_pdu; 1080 tcp_req.pdu->qpair = &tqpair; 1081 tqpair.mgmt_pdu = &mgmt_pdu; 1082 tqpair.mgmt_pdu->qpair = &tqpair; 1083 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1084 tcp_req.req.rsp = &rsp0; 1085 tcp_req.state = TCP_REQUEST_STATE_NEW; 1086 1087 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1088 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1089 1090 /* init pdu, make pdu need sgl buff */ 1091 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1092 sgl = &capsule_data->ccsqe.dptr.sgl1; 1093 1094 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1095 capsule_data->common.hlen = sizeof(*capsule_data); 1096 capsule_data->common.plen = 1096; 1097 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1098 /* Need to set to a non zero valid to check it gets copied to the response */ 1099 capsule_data->ccsqe.cid = cid; 1100 1101 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1102 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1103 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1104 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1105 1106 /* Process a command and ensure that it fails and the request is set up to return an error */ 1107 nvmf_tcp_req_process(&ttransport, &tcp_req); 1108 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1109 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1110 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1111 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1112 } 1113 1114 static void 1115 test_nvmf_tcp_pdu_ch_handle(void) 1116 { 1117 struct spdk_nvmf_tcp_qpair tqpair = {}; 1118 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1119 1120 mgmt_pdu.qpair = &tqpair; 1121 tqpair.mgmt_pdu = &mgmt_pdu; 1122 tqpair.pdu_in_progress = &pdu_in_progress; 1123 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1124 tqpair.cpda = 0; 1125 1126 /* Test case: Already received ICreq PDU. Expect: fail */ 1127 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1128 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1129 nvmf_tcp_pdu_ch_handle(&tqpair); 1130 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1131 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1132 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1133 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1134 1135 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1136 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1137 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1138 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1139 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1140 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1141 nvmf_tcp_pdu_ch_handle(&tqpair); 1142 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1143 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1144 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1145 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1146 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1147 1148 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1149 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1150 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1151 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1152 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1153 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1154 nvmf_tcp_pdu_ch_handle(&tqpair); 1155 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1156 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1157 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1158 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1159 1160 /* Test case: Unexpected PDU type. Expect: fail */ 1161 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1162 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1163 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1164 tqpair.pdu_in_progress->hdr.common.plen = 0; 1165 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1166 nvmf_tcp_pdu_ch_handle(&tqpair); 1167 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1168 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1169 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1170 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1171 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1172 1173 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1174 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1175 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1176 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1177 tqpair.pdu_in_progress->hdr.common.plen = 0; 1178 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1179 nvmf_tcp_pdu_ch_handle(&tqpair); 1180 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1181 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1182 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1183 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1184 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1185 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1186 1187 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1188 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1189 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1190 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1191 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1192 tqpair.pdu_in_progress->hdr.common.plen = 0; 1193 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1194 nvmf_tcp_pdu_ch_handle(&tqpair); 1195 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1196 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1197 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1198 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1199 struct spdk_nvme_tcp_term_req_hdr)); 1200 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1201 1202 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1203 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1204 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1205 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1206 tqpair.pdu_in_progress->hdr.common.plen = 0; 1207 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1208 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1209 nvmf_tcp_pdu_ch_handle(&tqpair); 1210 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1211 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1212 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1213 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1214 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1215 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1216 1217 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1218 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1219 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1220 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1221 tqpair.pdu_in_progress->hdr.common.plen = 0; 1222 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1223 nvmf_tcp_pdu_ch_handle(&tqpair); 1224 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1225 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1226 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1227 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1228 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1229 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1230 1231 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1232 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1233 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1234 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1235 tqpair.cpda = 1; 1236 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1237 tqpair.pdu_in_progress->hdr.common.plen = 0; 1238 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1239 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1240 nvmf_tcp_pdu_ch_handle(&tqpair); 1241 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1242 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1243 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1244 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1245 struct spdk_nvme_tcp_term_req_hdr)); 1246 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1247 1248 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1249 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1250 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1251 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1252 tqpair.cpda = 1; 1253 tqpair.pdu_in_progress->hdr.common.plen = 0; 1254 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1255 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1256 nvmf_tcp_pdu_ch_handle(&tqpair); 1257 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1258 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1259 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1260 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1261 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1262 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1263 1264 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1265 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1266 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1267 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1268 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1269 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1270 nvmf_tcp_pdu_ch_handle(&tqpair); 1271 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1272 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1273 struct spdk_nvme_tcp_common_pdu_hdr)); 1274 } 1275 1276 static void 1277 test_nvmf_tcp_tls_add_remove_credentials(void) 1278 { 1279 struct spdk_thread *thread; 1280 struct spdk_nvmf_transport *transport; 1281 struct spdk_nvmf_tcp_transport *ttransport; 1282 struct spdk_nvmf_transport_opts opts; 1283 struct spdk_nvmf_subsystem subsystem; 1284 struct tcp_psk_entry *entry; 1285 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1286 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1287 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1288 char *psk_file_path = "/tmp/psk.txt"; 1289 bool found = false; 1290 FILE *psk_file = NULL; 1291 mode_t oldmask; 1292 1293 thread = spdk_thread_create(NULL, NULL); 1294 SPDK_CU_ASSERT_FATAL(thread != NULL); 1295 spdk_set_thread(thread); 1296 1297 memset(&opts, 0, sizeof(opts)); 1298 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1299 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1300 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1301 opts.max_io_size = UT_MAX_IO_SIZE; 1302 opts.io_unit_size = UT_IO_UNIT_SIZE; 1303 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1304 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1305 transport = nvmf_tcp_create(&opts); 1306 1307 memset(&subsystem, 0, sizeof(subsystem)); 1308 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1309 1310 /* Create a text file containing PSK in interchange format. */ 1311 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1312 psk_file = fopen(psk_file_path, "w"); 1313 CU_ASSERT(psk_file != NULL); 1314 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1315 CU_ASSERT(fclose(psk_file) == 0); 1316 umask(oldmask); 1317 1318 struct spdk_json_val psk_json[] = { 1319 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1320 {"psk", 3, SPDK_JSON_VAL_NAME}, 1321 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1322 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1323 }; 1324 1325 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1326 1327 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1328 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1329 if ((strcmp(subnqn, entry->subnqn) == 0) && 1330 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1331 found = true; 1332 } 1333 } 1334 1335 CU_ASSERT(found == true); 1336 found = false; 1337 1338 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1339 1340 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1341 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1342 if ((strcmp(subnqn, entry->subnqn) == 0) && 1343 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1344 found = true; 1345 } 1346 } 1347 1348 CU_ASSERT(found == false); 1349 1350 CU_ASSERT(remove(psk_file_path) == 0); 1351 1352 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1353 1354 spdk_thread_exit(thread); 1355 while (!spdk_thread_is_exited(thread)) { 1356 spdk_thread_poll(thread, 0, 0); 1357 } 1358 spdk_thread_destroy(thread); 1359 } 1360 1361 static void 1362 test_nvmf_tcp_tls_generate_psk_id(void) 1363 { 1364 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1365 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1366 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1367 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1368 char too_small_psk_id[5] = {}; 1369 1370 /* Check if we can generate expected PSK id. */ 1371 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1372 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1373 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1374 1375 /* Test with a buffer that is too small to fit PSK id. */ 1376 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1377 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1378 1379 /* Test with unknown cipher suite. */ 1380 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1381 subnqn, UINT8_MAX) != 0); 1382 } 1383 1384 static void 1385 test_nvmf_tcp_tls_generate_retained_psk(void) 1386 { 1387 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1388 const char psk_reference1[] = {"1234567890ABCDEF"}; 1389 const char psk_reference2[] = {"FEDCBA0987654321"}; 1390 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1391 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1392 char *unhexlified1; 1393 char *unhexlified2; 1394 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1395 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1396 uint8_t too_small_psk_retained[5] = {}; 1397 int psk_retained_len1, psk_retained_len2; 1398 int retained_size; 1399 1400 unhexlified1 = spdk_unhexlify(psk_reference1); 1401 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1402 unhexlified2 = spdk_unhexlify(psk_reference2); 1403 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1404 1405 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1406 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1407 free(unhexlified1); 1408 free(unhexlified2); 1409 1410 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1411 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1412 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1413 CU_ASSERT(retained_size > 0); 1414 1415 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1416 psk_retained2, 1417 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1418 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1419 1420 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1421 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1422 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1423 CU_ASSERT(psk_retained_len1 > 0); 1424 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1425 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1426 CU_ASSERT(psk_retained_len2 > 0); 1427 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1428 1429 /* Make sure that passing unknown value as hash errors out the function. */ 1430 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1431 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1432 1433 /* Make sure that passing buffer insufficient in size errors out the function. */ 1434 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1435 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1436 } 1437 1438 static void 1439 test_nvmf_tcp_tls_generate_tls_psk(void) 1440 { 1441 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1442 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1443 const char psk_reference[] = {"1234567890ABCDEF"}; 1444 char *unhexlified; 1445 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1446 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1447 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1448 uint8_t too_small_psk_tls[5] = {}; 1449 int retained_size, tls_size; 1450 1451 unhexlified = spdk_unhexlify(psk_reference); 1452 CU_ASSERT(unhexlified != NULL); 1453 1454 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1455 free(unhexlified); 1456 1457 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1458 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1459 CU_ASSERT(retained_size > 0); 1460 1461 /* Make sure that different cipher suites produce different TLS PSKs. */ 1462 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1463 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1464 CU_ASSERT(tls_size > 0); 1465 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1466 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1467 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1468 1469 /* Make sure that passing unknown value as hash errors out the function. */ 1470 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1471 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1472 1473 /* Make sure that passing buffer insufficient in size errors out the function. */ 1474 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1475 too_small_psk_tls, sizeof(too_small_psk_tls), 1476 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1477 } 1478 1479 int 1480 main(int argc, char **argv) 1481 { 1482 CU_pSuite suite = NULL; 1483 unsigned int num_failures; 1484 1485 CU_initialize_registry(); 1486 1487 suite = CU_add_suite("nvmf", NULL, NULL); 1488 1489 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1490 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1491 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1492 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1493 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1494 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1495 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1496 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1497 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1498 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1499 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1500 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1501 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1502 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1503 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1504 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1505 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1506 1507 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1508 CU_cleanup_registry(); 1509 return num_failures; 1510 } 1511