1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 90 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 91 bool, 92 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 93 true); 94 95 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 101 bool, 102 (struct spdk_nvmf_ctrlr *ctrlr), 103 false); 104 105 DEFINE_STUB(nvmf_ctrlr_copy_supported, 106 bool, 107 (struct spdk_nvmf_ctrlr *ctrlr), 108 false); 109 110 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 111 int, 112 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 113 struct spdk_nvmf_request *req), 114 0); 115 116 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 117 int, 118 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct spdk_nvmf_request *req), 120 0); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req), 162 0); 163 164 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 165 int, 166 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 167 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 168 0); 169 170 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 171 bool, 172 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 173 false); 174 175 DEFINE_STUB(nvmf_transport_req_complete, 176 int, 177 (struct spdk_nvmf_request *req), 178 0); 179 180 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 181 bool, 182 (struct spdk_bdev *bdev), 183 false); 184 185 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 186 int, 187 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 188 struct spdk_nvmf_request *req), 189 0); 190 191 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 192 193 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 194 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 195 struct spdk_nvmf_transport *transport)); 196 197 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 198 int, 199 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 200 0); 201 202 DEFINE_STUB(spdk_sock_group_get_ctx, 203 void *, 204 (struct spdk_sock_group *group), 205 NULL); 206 207 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 208 209 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 210 enum spdk_nvme_transport_type trtype)); 211 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 212 213 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 214 215 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 216 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 217 218 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 219 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 220 221 DEFINE_STUB(nvmf_transport_req_free, 222 int, 223 (struct spdk_nvmf_request *req), 224 0); 225 226 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 227 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 228 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 229 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 230 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 231 232 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 233 (const struct spdk_bdev *bdev), 0); 234 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 235 (const struct spdk_bdev *bdev), 0); 236 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 237 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 238 239 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 240 (const struct spdk_nvme_ns_data *nsdata), 0); 241 242 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 243 244 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 245 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 246 (const struct spdk_nvmf_subsystem *subsystem), NULL); 247 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 248 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 249 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 250 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 251 252 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 253 254 struct spdk_io_channel * 255 spdk_accel_get_io_channel(void) 256 { 257 return spdk_get_io_channel(g_accel_p); 258 } 259 260 DEFINE_STUB(spdk_accel_submit_crc32cv, 261 int, 262 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 263 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 264 0); 265 266 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 267 int, 268 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 269 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 270 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 271 0) 272 273 struct spdk_bdev { 274 int ut_mock; 275 uint64_t blockcnt; 276 }; 277 278 int 279 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 280 const struct spdk_nvme_transport_id *trid2) 281 { 282 return 0; 283 } 284 285 const char * 286 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 287 { 288 switch (trtype) { 289 case SPDK_NVME_TRANSPORT_PCIE: 290 return "PCIe"; 291 case SPDK_NVME_TRANSPORT_RDMA: 292 return "RDMA"; 293 case SPDK_NVME_TRANSPORT_FC: 294 return "FC"; 295 default: 296 return NULL; 297 } 298 } 299 300 int 301 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 302 { 303 int len, i; 304 305 if (trstring == NULL) { 306 return -EINVAL; 307 } 308 309 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 310 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 311 return -EINVAL; 312 } 313 314 /* cast official trstring to uppercase version of input. */ 315 for (i = 0; i < len; i++) { 316 trid->trstring[i] = toupper(trstring[i]); 317 } 318 return 0; 319 } 320 321 int 322 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 323 struct spdk_nvmf_transport_poll_group *group, 324 struct spdk_nvmf_transport *transport, 325 uint32_t length) 326 { 327 /* length more than 1 io unit length will fail. */ 328 if (length >= transport->opts.io_unit_size) { 329 return -EINVAL; 330 } 331 332 req->iovcnt = 1; 333 req->iov[0].iov_base = (void *)0xDEADBEEF; 334 335 return 0; 336 } 337 338 339 void 340 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 341 bool dif_insert_or_strip) 342 { 343 uint64_t num_blocks; 344 345 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 346 num_blocks = ns->bdev->blockcnt; 347 nsdata->nsze = num_blocks; 348 nsdata->ncap = num_blocks; 349 nsdata->nuse = num_blocks; 350 nsdata->nlbaf = 0; 351 nsdata->flbas.format = 0; 352 nsdata->flbas.msb_format = 0; 353 nsdata->lbaf[0].lbads = spdk_u32log2(512); 354 } 355 356 const char * 357 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 358 { 359 return subsystem->sn; 360 } 361 362 const char * 363 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 364 { 365 return subsystem->mn; 366 } 367 368 static void 369 test_nvmf_tcp_create(void) 370 { 371 struct spdk_thread *thread; 372 struct spdk_nvmf_transport *transport; 373 struct spdk_nvmf_tcp_transport *ttransport; 374 struct spdk_nvmf_transport_opts opts; 375 376 thread = spdk_thread_create(NULL, NULL); 377 SPDK_CU_ASSERT_FATAL(thread != NULL); 378 spdk_set_thread(thread); 379 380 /* case 1 */ 381 memset(&opts, 0, sizeof(opts)); 382 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 383 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 384 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 385 opts.max_io_size = UT_MAX_IO_SIZE; 386 opts.io_unit_size = UT_IO_UNIT_SIZE; 387 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 388 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 389 /* expect success */ 390 transport = nvmf_tcp_create(&opts); 391 CU_ASSERT_PTR_NOT_NULL(transport); 392 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 393 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 394 transport->opts = opts; 395 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 396 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 397 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 398 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 399 /* destroy transport */ 400 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 401 402 /* case 2 */ 403 memset(&opts, 0, sizeof(opts)); 404 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 405 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 406 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 407 opts.max_io_size = UT_MAX_IO_SIZE; 408 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 409 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 410 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 411 /* expect success */ 412 transport = nvmf_tcp_create(&opts); 413 CU_ASSERT_PTR_NOT_NULL(transport); 414 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 415 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 416 transport->opts = opts; 417 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 418 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 419 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 420 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 421 /* destroy transport */ 422 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 423 424 /* case 3 */ 425 memset(&opts, 0, sizeof(opts)); 426 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 427 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 428 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 429 opts.max_io_size = UT_MAX_IO_SIZE; 430 opts.io_unit_size = 16; 431 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 432 /* expect fails */ 433 transport = nvmf_tcp_create(&opts); 434 CU_ASSERT_PTR_NULL(transport); 435 436 spdk_thread_exit(thread); 437 while (!spdk_thread_is_exited(thread)) { 438 spdk_thread_poll(thread, 0, 0); 439 } 440 spdk_thread_destroy(thread); 441 } 442 443 static void 444 test_nvmf_tcp_destroy(void) 445 { 446 struct spdk_thread *thread; 447 struct spdk_nvmf_transport *transport; 448 struct spdk_nvmf_transport_opts opts; 449 450 thread = spdk_thread_create(NULL, NULL); 451 SPDK_CU_ASSERT_FATAL(thread != NULL); 452 spdk_set_thread(thread); 453 454 /* case 1 */ 455 memset(&opts, 0, sizeof(opts)); 456 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 457 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 458 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 459 opts.max_io_size = UT_MAX_IO_SIZE; 460 opts.io_unit_size = UT_IO_UNIT_SIZE; 461 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 462 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 463 transport = nvmf_tcp_create(&opts); 464 CU_ASSERT_PTR_NOT_NULL(transport); 465 transport->opts = opts; 466 /* destroy transport */ 467 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 468 469 spdk_thread_exit(thread); 470 while (!spdk_thread_is_exited(thread)) { 471 spdk_thread_poll(thread, 0, 0); 472 } 473 spdk_thread_destroy(thread); 474 } 475 476 static void 477 init_accel(void) 478 { 479 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 480 sizeof(int), "accel_p"); 481 } 482 483 static void 484 fini_accel(void) 485 { 486 spdk_io_device_unregister(g_accel_p, NULL); 487 } 488 489 static void 490 test_nvmf_tcp_poll_group_create(void) 491 { 492 struct spdk_nvmf_transport *transport; 493 struct spdk_nvmf_transport_poll_group *group; 494 struct spdk_nvmf_tcp_poll_group *tgroup; 495 struct spdk_thread *thread; 496 struct spdk_nvmf_transport_opts opts; 497 struct spdk_sock_group grp = {}; 498 499 thread = spdk_thread_create(NULL, NULL); 500 SPDK_CU_ASSERT_FATAL(thread != NULL); 501 spdk_set_thread(thread); 502 503 init_accel(); 504 505 memset(&opts, 0, sizeof(opts)); 506 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 507 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 508 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 509 opts.max_io_size = UT_MAX_IO_SIZE; 510 opts.io_unit_size = UT_IO_UNIT_SIZE; 511 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 512 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 513 transport = nvmf_tcp_create(&opts); 514 CU_ASSERT_PTR_NOT_NULL(transport); 515 transport->opts = opts; 516 MOCK_SET(spdk_sock_group_create, &grp); 517 group = nvmf_tcp_poll_group_create(transport, NULL); 518 MOCK_CLEAR_P(spdk_sock_group_create); 519 SPDK_CU_ASSERT_FATAL(group); 520 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 521 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 522 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 523 } 524 group->transport = transport; 525 nvmf_tcp_poll_group_destroy(group); 526 nvmf_tcp_destroy(transport, NULL, NULL); 527 528 fini_accel(); 529 spdk_thread_exit(thread); 530 while (!spdk_thread_is_exited(thread)) { 531 spdk_thread_poll(thread, 0, 0); 532 } 533 spdk_thread_destroy(thread); 534 } 535 536 static void 537 test_nvmf_tcp_send_c2h_data(void) 538 { 539 struct spdk_thread *thread; 540 struct spdk_nvmf_tcp_transport ttransport = {}; 541 struct spdk_nvmf_tcp_qpair tqpair = {}; 542 struct spdk_nvmf_tcp_req tcp_req = {}; 543 struct nvme_tcp_pdu pdu = {}; 544 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 545 546 ttransport.tcp_opts.c2h_success = true; 547 thread = spdk_thread_create(NULL, NULL); 548 SPDK_CU_ASSERT_FATAL(thread != NULL); 549 spdk_set_thread(thread); 550 551 tcp_req.pdu = &pdu; 552 tcp_req.req.length = 300; 553 tcp_req.req.qpair = &tqpair.qpair; 554 555 tqpair.qpair.transport = &ttransport.transport; 556 557 /* Set qpair state to make unrelated operations NOP */ 558 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 559 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 560 561 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 562 563 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 564 tcp_req.req.iov[0].iov_len = 101; 565 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 566 tcp_req.req.iov[1].iov_len = 100; 567 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 568 tcp_req.req.iov[2].iov_len = 99; 569 tcp_req.req.iovcnt = 3; 570 tcp_req.req.length = 300; 571 572 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 573 574 c2h_data = &pdu.hdr.c2h_data; 575 CU_ASSERT(c2h_data->datao == 0); 576 CU_ASSERT(c2h_data->datal = 300); 577 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 578 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 579 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 580 581 CU_ASSERT(pdu.data_iovcnt == 3); 582 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 583 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 584 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 585 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 586 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 587 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 588 589 tcp_req.pdu_in_use = false; 590 tcp_req.rsp.cdw0 = 1; 591 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 592 593 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 594 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 595 596 ttransport.tcp_opts.c2h_success = false; 597 tcp_req.pdu_in_use = false; 598 tcp_req.rsp.cdw0 = 0; 599 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 600 601 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 602 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 603 604 tcp_req.pdu_in_use = false; 605 tcp_req.rsp.cdw0 = 1; 606 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 607 608 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 609 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 610 611 spdk_thread_exit(thread); 612 while (!spdk_thread_is_exited(thread)) { 613 spdk_thread_poll(thread, 0, 0); 614 } 615 spdk_thread_destroy(thread); 616 } 617 618 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 619 620 static void 621 test_nvmf_tcp_h2c_data_hdr_handle(void) 622 { 623 struct spdk_nvmf_tcp_transport ttransport = {}; 624 struct spdk_nvmf_tcp_qpair tqpair = {}; 625 struct nvme_tcp_pdu pdu = {}; 626 struct spdk_nvmf_tcp_req tcp_req = {}; 627 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 628 629 /* Set qpair state to make unrelated operations NOP */ 630 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 631 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 632 tqpair.resource_count = 1; 633 tqpair.reqs = &tcp_req; 634 635 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 636 tcp_req.req.iov[0].iov_len = 101; 637 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 638 tcp_req.req.iov[1].iov_len = 99; 639 tcp_req.req.iovcnt = 2; 640 tcp_req.req.length = 200; 641 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 642 643 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 644 tcp_req.req.cmd->nvme_cmd.cid = 1; 645 tcp_req.ttag = 1; 646 647 h2c_data = &pdu.hdr.h2c_data; 648 h2c_data->cccid = 1; 649 h2c_data->ttag = 1; 650 h2c_data->datao = 0; 651 h2c_data->datal = 200; 652 653 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 654 655 CU_ASSERT(pdu.data_iovcnt == 2); 656 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 657 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 658 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 659 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 660 } 661 662 663 static void 664 test_nvmf_tcp_in_capsule_data_handle(void) 665 { 666 struct spdk_nvmf_tcp_transport ttransport = {}; 667 struct spdk_nvmf_tcp_qpair tqpair = {}; 668 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 669 union nvmf_c2h_msg rsp0 = {}; 670 union nvmf_c2h_msg rsp = {}; 671 672 struct spdk_nvmf_request *req_temp = NULL; 673 struct spdk_nvmf_tcp_req tcp_req2 = {}; 674 struct spdk_nvmf_tcp_req tcp_req1 = {}; 675 676 struct spdk_nvme_tcp_cmd *capsule_data; 677 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 678 struct spdk_nvme_sgl_descriptor *sgl; 679 680 struct spdk_nvmf_transport_poll_group *group; 681 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 682 struct spdk_sock_group grp = {}; 683 684 tqpair.pdu_in_progress = &pdu_in_progress; 685 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 686 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 687 688 tcp_group.sock_group = &grp; 689 TAILQ_INIT(&tcp_group.qpairs); 690 group = &tcp_group.group; 691 group->transport = &ttransport.transport; 692 STAILQ_INIT(&group->pending_buf_queue); 693 tqpair.group = &tcp_group; 694 695 TAILQ_INIT(&tqpair.tcp_req_free_queue); 696 TAILQ_INIT(&tqpair.tcp_req_working_queue); 697 698 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 699 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 700 tqpair.qpair.transport = &ttransport.transport; 701 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 702 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 703 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 704 705 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 706 tcp_req2.req.qpair = &tqpair.qpair; 707 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 708 tcp_req2.req.rsp = &rsp; 709 710 /* init tcp_req1 */ 711 tcp_req1.req.qpair = &tqpair.qpair; 712 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 713 tcp_req1.req.rsp = &rsp0; 714 tcp_req1.state = TCP_REQUEST_STATE_NEW; 715 716 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 717 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 718 719 /* init pdu, make pdu need sgl buff */ 720 pdu = tqpair.pdu_in_progress; 721 capsule_data = &pdu->hdr.capsule_cmd; 722 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 723 sgl = &capsule_data->ccsqe.dptr.sgl1; 724 725 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 726 capsule_data->common.hlen = sizeof(*capsule_data); 727 capsule_data->common.plen = 1096; 728 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 729 730 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 731 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 732 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 733 734 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 735 736 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 737 nvmf_tcp_req_process(&ttransport, &tcp_req1); 738 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 739 740 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 741 742 /* process tqpair capsule req. but we still remain req in pending_buff. */ 743 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 744 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 745 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 746 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 747 if (req_temp == &tcp_req2.req) { 748 break; 749 } 750 } 751 CU_ASSERT(req_temp == NULL); 752 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 753 } 754 755 static void 756 test_nvmf_tcp_qpair_init_mem_resource(void) 757 { 758 int rc; 759 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 760 struct spdk_nvmf_transport transport = {}; 761 struct spdk_thread *thread; 762 763 thread = spdk_thread_create(NULL, NULL); 764 SPDK_CU_ASSERT_FATAL(thread != NULL); 765 spdk_set_thread(thread); 766 767 tqpair = calloc(1, sizeof(*tqpair)); 768 tqpair->qpair.transport = &transport; 769 770 nvmf_tcp_opts_init(&transport.opts); 771 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 772 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 773 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 774 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 775 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 776 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 777 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 778 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 779 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 780 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 781 CU_ASSERT(transport.opts.transport_specific == NULL); 782 783 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 784 CU_ASSERT(rc == 0); 785 CU_ASSERT(tqpair->host_hdgst_enable == true); 786 CU_ASSERT(tqpair->host_ddgst_enable == true); 787 788 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 789 CU_ASSERT(rc == 0); 790 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 791 CU_ASSERT(tqpair->reqs != NULL); 792 CU_ASSERT(tqpair->bufs != NULL); 793 CU_ASSERT(tqpair->pdus != NULL); 794 /* Just to check the first and last entry */ 795 CU_ASSERT(tqpair->reqs[0].ttag == 1); 796 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 797 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 798 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 799 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 800 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 801 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 802 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 803 CU_ASSERT(tqpair->reqs[127].ttag == 128); 804 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 805 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 806 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 807 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 808 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 809 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 810 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 811 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 812 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 813 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 814 CU_ASSERT(tqpair->pdu_in_progress == 815 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 816 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 817 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 818 819 /* Free all of tqpair resource */ 820 nvmf_tcp_qpair_destroy(tqpair); 821 822 spdk_thread_exit(thread); 823 while (!spdk_thread_is_exited(thread)) { 824 spdk_thread_poll(thread, 0, 0); 825 } 826 spdk_thread_destroy(thread); 827 } 828 829 static void 830 test_nvmf_tcp_send_c2h_term_req(void) 831 { 832 struct spdk_nvmf_tcp_qpair tqpair = {}; 833 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 834 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 835 uint32_t error_offset = 1; 836 837 mgmt_pdu.qpair = &tqpair; 838 tqpair.mgmt_pdu = &mgmt_pdu; 839 tqpair.pdu_in_progress = &pdu_in_progress; 840 tqpair.tcp_pdu_working_count = 1; 841 842 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 843 pdu.hdr.common.hlen = 64; 844 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 845 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 846 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 847 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 848 pdu.hdr.common.hlen); 849 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 850 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 851 852 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 853 pdu.hdr.common.hlen = 255; 854 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 855 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 856 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 857 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 858 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 859 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 860 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 861 } 862 863 static void 864 test_nvmf_tcp_send_capsule_resp_pdu(void) 865 { 866 struct spdk_nvmf_tcp_req tcp_req = {}; 867 struct spdk_nvmf_tcp_qpair tqpair = {}; 868 struct nvme_tcp_pdu pdu = {}; 869 870 tcp_req.pdu_in_use = false; 871 tcp_req.req.qpair = &tqpair.qpair; 872 tcp_req.pdu = &pdu; 873 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 874 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 875 tqpair.host_hdgst_enable = true; 876 877 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 878 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 879 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 880 SPDK_NVME_TCP_DIGEST_LEN); 881 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 882 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 883 sizeof(struct spdk_nvme_cpl))); 884 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 885 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 886 CU_ASSERT(pdu.cb_arg == &tcp_req); 887 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 888 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 889 890 /* hdgst disable */ 891 tqpair.host_hdgst_enable = false; 892 tcp_req.pdu_in_use = false; 893 memset(&pdu, 0, sizeof(pdu)); 894 895 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 896 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 897 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 898 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 899 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 900 sizeof(struct spdk_nvme_cpl))); 901 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 902 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 903 CU_ASSERT(pdu.cb_arg == &tcp_req); 904 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 905 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 906 } 907 908 static void 909 test_nvmf_tcp_icreq_handle(void) 910 { 911 struct spdk_nvmf_tcp_transport ttransport = {}; 912 struct spdk_nvmf_tcp_qpair tqpair = {}; 913 struct nvme_tcp_pdu pdu = {}; 914 struct nvme_tcp_pdu mgmt_pdu = {}; 915 struct nvme_tcp_pdu pdu_in_progress = {}; 916 struct spdk_nvme_tcp_ic_resp *ic_resp; 917 918 mgmt_pdu.qpair = &tqpair; 919 tqpair.mgmt_pdu = &mgmt_pdu; 920 tqpair.pdu_in_progress = &pdu_in_progress; 921 tqpair.tcp_pdu_working_count = 1; 922 923 /* case 1: Expected ICReq PFV 0 and got are different. */ 924 pdu.hdr.ic_req.pfv = 1; 925 926 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 927 928 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 929 930 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 931 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 932 933 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 934 935 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 936 937 /* case 3: Expect: PASS. */ 938 ttransport.transport.opts.max_io_size = 32; 939 pdu.hdr.ic_req.pfv = 0; 940 tqpair.host_hdgst_enable = false; 941 tqpair.host_ddgst_enable = false; 942 tqpair.recv_buf_size = 64; 943 pdu.hdr.ic_req.hpda = 16; 944 945 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 946 947 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 948 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 949 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 950 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 951 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 952 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 953 CU_ASSERT(ic_resp->pfv == 0); 954 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 955 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 956 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 957 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 958 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 959 } 960 961 static void 962 test_nvmf_tcp_check_xfer_type(void) 963 { 964 const uint16_t cid = 0xAA; 965 struct spdk_nvmf_tcp_transport ttransport = {}; 966 struct spdk_nvmf_tcp_qpair tqpair = {}; 967 struct nvme_tcp_pdu pdu_in_progress = {}; 968 union nvmf_c2h_msg rsp0 = {}; 969 970 struct spdk_nvmf_tcp_req tcp_req = {}; 971 struct nvme_tcp_pdu rsp_pdu = {}; 972 973 struct spdk_nvme_tcp_cmd *capsule_data; 974 struct spdk_nvme_sgl_descriptor *sgl; 975 976 struct spdk_nvmf_transport_poll_group *group; 977 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 978 struct spdk_sock_group grp = {}; 979 980 tqpair.pdu_in_progress = &pdu_in_progress; 981 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 982 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 983 984 tcp_group.sock_group = &grp; 985 TAILQ_INIT(&tcp_group.qpairs); 986 group = &tcp_group.group; 987 group->transport = &ttransport.transport; 988 STAILQ_INIT(&group->pending_buf_queue); 989 tqpair.group = &tcp_group; 990 991 TAILQ_INIT(&tqpair.tcp_req_free_queue); 992 TAILQ_INIT(&tqpair.tcp_req_working_queue); 993 994 tqpair.qpair.transport = &ttransport.transport; 995 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 996 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 997 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 998 999 /* init tcp_req */ 1000 tcp_req.req.qpair = &tqpair.qpair; 1001 tcp_req.pdu = &rsp_pdu; 1002 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1003 tcp_req.req.rsp = &rsp0; 1004 tcp_req.state = TCP_REQUEST_STATE_NEW; 1005 1006 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1007 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1008 1009 /* init pdu, make pdu need sgl buff */ 1010 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1011 sgl = &capsule_data->ccsqe.dptr.sgl1; 1012 1013 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1014 capsule_data->common.hlen = sizeof(*capsule_data); 1015 capsule_data->common.plen = 1096; 1016 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1017 /* Need to set to a non zero valid to check it gets copied to the response */ 1018 capsule_data->ccsqe.cid = cid; 1019 1020 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1021 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1022 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1023 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1024 1025 /* Process a command and ensure that it fails and the request is set up to return an error */ 1026 nvmf_tcp_req_process(&ttransport, &tcp_req); 1027 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1028 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1029 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1030 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1031 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1032 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1033 } 1034 1035 static void 1036 test_nvmf_tcp_invalid_sgl(void) 1037 { 1038 const uint16_t cid = 0xAABB; 1039 struct spdk_nvmf_tcp_transport ttransport = {}; 1040 struct spdk_nvmf_tcp_qpair tqpair = {}; 1041 struct nvme_tcp_pdu pdu_in_progress = {}; 1042 union nvmf_c2h_msg rsp0 = {}; 1043 1044 struct spdk_nvmf_tcp_req tcp_req = {}; 1045 struct nvme_tcp_pdu rsp_pdu = {}; 1046 struct nvme_tcp_pdu mgmt_pdu = {}; 1047 1048 struct spdk_nvme_tcp_cmd *capsule_data; 1049 struct spdk_nvme_sgl_descriptor *sgl; 1050 1051 struct spdk_nvmf_transport_poll_group *group; 1052 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1053 struct spdk_sock_group grp = {}; 1054 1055 tqpair.pdu_in_progress = &pdu_in_progress; 1056 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1057 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1058 1059 tcp_group.sock_group = &grp; 1060 TAILQ_INIT(&tcp_group.qpairs); 1061 group = &tcp_group.group; 1062 group->transport = &ttransport.transport; 1063 STAILQ_INIT(&group->pending_buf_queue); 1064 tqpair.group = &tcp_group; 1065 1066 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1067 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1068 1069 tqpair.qpair.transport = &ttransport.transport; 1070 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1071 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1072 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1073 1074 /* init tcp_req */ 1075 tcp_req.req.qpair = &tqpair.qpair; 1076 tcp_req.pdu = &rsp_pdu; 1077 tcp_req.pdu->qpair = &tqpair; 1078 tqpair.mgmt_pdu = &mgmt_pdu; 1079 tqpair.mgmt_pdu->qpair = &tqpair; 1080 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1081 tcp_req.req.rsp = &rsp0; 1082 tcp_req.state = TCP_REQUEST_STATE_NEW; 1083 1084 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1085 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1086 1087 /* init pdu, make pdu need sgl buff */ 1088 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1089 sgl = &capsule_data->ccsqe.dptr.sgl1; 1090 1091 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1092 capsule_data->common.hlen = sizeof(*capsule_data); 1093 capsule_data->common.plen = 1096; 1094 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1095 /* Need to set to a non zero valid to check it gets copied to the response */ 1096 capsule_data->ccsqe.cid = cid; 1097 1098 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1099 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1100 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1101 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1102 1103 /* Process a command and ensure that it fails and the request is set up to return an error */ 1104 nvmf_tcp_req_process(&ttransport, &tcp_req); 1105 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1106 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1107 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1108 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1109 } 1110 1111 static void 1112 test_nvmf_tcp_pdu_ch_handle(void) 1113 { 1114 struct spdk_nvmf_tcp_qpair tqpair = {}; 1115 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1116 1117 mgmt_pdu.qpair = &tqpair; 1118 tqpair.mgmt_pdu = &mgmt_pdu; 1119 tqpair.pdu_in_progress = &pdu_in_progress; 1120 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1121 tqpair.cpda = 0; 1122 1123 /* Test case: Already received ICreq PDU. Expect: fail */ 1124 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1125 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1126 nvmf_tcp_pdu_ch_handle(&tqpair); 1127 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1128 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1129 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1130 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1131 1132 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1133 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1134 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1135 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1136 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1137 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1138 nvmf_tcp_pdu_ch_handle(&tqpair); 1139 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1140 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1141 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1142 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1143 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1144 1145 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1146 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1147 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1148 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1149 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1150 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1151 nvmf_tcp_pdu_ch_handle(&tqpair); 1152 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1153 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1154 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1155 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1156 1157 /* Test case: Unexpected PDU type. Expect: fail */ 1158 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1159 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1160 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1161 tqpair.pdu_in_progress->hdr.common.plen = 0; 1162 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1163 nvmf_tcp_pdu_ch_handle(&tqpair); 1164 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1165 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1166 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1167 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1168 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1169 1170 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1171 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1172 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1173 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1174 tqpair.pdu_in_progress->hdr.common.plen = 0; 1175 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1176 nvmf_tcp_pdu_ch_handle(&tqpair); 1177 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1178 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1179 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1180 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1181 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1182 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1183 1184 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1185 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1186 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1187 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1188 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1189 tqpair.pdu_in_progress->hdr.common.plen = 0; 1190 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1191 nvmf_tcp_pdu_ch_handle(&tqpair); 1192 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1193 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1194 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1195 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1196 struct spdk_nvme_tcp_term_req_hdr)); 1197 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1198 1199 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1200 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1201 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1202 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1203 tqpair.pdu_in_progress->hdr.common.plen = 0; 1204 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1205 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1206 nvmf_tcp_pdu_ch_handle(&tqpair); 1207 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1208 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1209 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1210 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1211 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1212 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1213 1214 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1215 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1216 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1217 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1218 tqpair.pdu_in_progress->hdr.common.plen = 0; 1219 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1220 nvmf_tcp_pdu_ch_handle(&tqpair); 1221 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1222 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1223 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1224 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1225 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1226 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1227 1228 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1229 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1230 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1231 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1232 tqpair.cpda = 1; 1233 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1234 tqpair.pdu_in_progress->hdr.common.plen = 0; 1235 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1236 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1237 nvmf_tcp_pdu_ch_handle(&tqpair); 1238 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1239 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1240 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1241 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1242 struct spdk_nvme_tcp_term_req_hdr)); 1243 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1244 1245 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1246 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1247 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1248 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1249 tqpair.cpda = 1; 1250 tqpair.pdu_in_progress->hdr.common.plen = 0; 1251 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1252 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1253 nvmf_tcp_pdu_ch_handle(&tqpair); 1254 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1255 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1256 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1257 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1258 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1259 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1260 1261 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1262 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1263 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1264 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1265 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1266 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1267 nvmf_tcp_pdu_ch_handle(&tqpair); 1268 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1269 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1270 struct spdk_nvme_tcp_common_pdu_hdr)); 1271 } 1272 1273 static void 1274 test_nvmf_tcp_tls_add_remove_credentials(void) 1275 { 1276 struct spdk_thread *thread; 1277 struct spdk_nvmf_transport *transport; 1278 struct spdk_nvmf_tcp_transport *ttransport; 1279 struct spdk_nvmf_transport_opts opts; 1280 struct spdk_nvmf_subsystem subsystem; 1281 struct tcp_psk_entry *entry; 1282 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1283 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1284 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1285 char *psk_file_path = "/tmp/psk.txt"; 1286 bool found = false; 1287 FILE *psk_file = NULL; 1288 mode_t oldmask; 1289 1290 thread = spdk_thread_create(NULL, NULL); 1291 SPDK_CU_ASSERT_FATAL(thread != NULL); 1292 spdk_set_thread(thread); 1293 1294 memset(&opts, 0, sizeof(opts)); 1295 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1296 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1297 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1298 opts.max_io_size = UT_MAX_IO_SIZE; 1299 opts.io_unit_size = UT_IO_UNIT_SIZE; 1300 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1301 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1302 transport = nvmf_tcp_create(&opts); 1303 1304 memset(&subsystem, 0, sizeof(subsystem)); 1305 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1306 1307 /* Create a text file containing PSK in interchange format. */ 1308 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1309 psk_file = fopen(psk_file_path, "w"); 1310 CU_ASSERT(psk_file != NULL); 1311 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1312 CU_ASSERT(fclose(psk_file) == 0); 1313 umask(oldmask); 1314 1315 struct spdk_json_val psk_json[] = { 1316 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1317 {"psk", 3, SPDK_JSON_VAL_NAME}, 1318 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1319 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1320 }; 1321 1322 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1323 1324 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1325 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1326 if ((strcmp(subnqn, entry->subnqn) == 0) && 1327 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1328 found = true; 1329 } 1330 } 1331 1332 CU_ASSERT(found == true); 1333 found = false; 1334 1335 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1336 1337 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1338 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1339 if ((strcmp(subnqn, entry->subnqn) == 0) && 1340 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1341 found = true; 1342 } 1343 } 1344 1345 CU_ASSERT(found == false); 1346 1347 CU_ASSERT(remove(psk_file_path) == 0); 1348 1349 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1350 1351 spdk_thread_exit(thread); 1352 while (!spdk_thread_is_exited(thread)) { 1353 spdk_thread_poll(thread, 0, 0); 1354 } 1355 spdk_thread_destroy(thread); 1356 } 1357 1358 static void 1359 test_nvmf_tcp_tls_generate_psk_id(void) 1360 { 1361 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1362 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1363 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1364 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1365 char too_small_psk_id[5] = {}; 1366 1367 /* Check if we can generate expected PSK id. */ 1368 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1369 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1370 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1371 1372 /* Test with a buffer that is too small to fit PSK id. */ 1373 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1374 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1375 1376 /* Test with unknown cipher suite. */ 1377 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1378 subnqn, UINT8_MAX) != 0); 1379 } 1380 1381 static void 1382 test_nvmf_tcp_tls_generate_retained_psk(void) 1383 { 1384 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1385 const char psk_reference1[] = {"1234567890ABCDEF"}; 1386 const char psk_reference2[] = {"FEDCBA0987654321"}; 1387 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1388 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1389 char *unhexlified1; 1390 char *unhexlified2; 1391 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1392 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1393 uint8_t too_small_psk_retained[5] = {}; 1394 int psk_retained_len1, psk_retained_len2; 1395 int retained_size; 1396 1397 unhexlified1 = spdk_unhexlify(psk_reference1); 1398 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1399 unhexlified2 = spdk_unhexlify(psk_reference2); 1400 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1401 1402 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1403 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1404 free(unhexlified1); 1405 free(unhexlified2); 1406 1407 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1408 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1409 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1410 CU_ASSERT(retained_size > 0); 1411 1412 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1413 psk_retained2, 1414 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1415 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1416 1417 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1418 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1419 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1420 CU_ASSERT(psk_retained_len1 > 0); 1421 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1422 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1423 CU_ASSERT(psk_retained_len2 > 0); 1424 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1425 1426 /* Make sure that passing unknown value as hash errors out the function. */ 1427 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1428 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1429 1430 /* Make sure that passing buffer insufficient in size errors out the function. */ 1431 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1432 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1433 } 1434 1435 static void 1436 test_nvmf_tcp_tls_generate_tls_psk(void) 1437 { 1438 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1439 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1440 const char psk_reference[] = {"1234567890ABCDEF"}; 1441 char *unhexlified; 1442 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1443 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1444 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1445 uint8_t too_small_psk_tls[5] = {}; 1446 int retained_size, tls_size; 1447 1448 unhexlified = spdk_unhexlify(psk_reference); 1449 CU_ASSERT(unhexlified != NULL); 1450 1451 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1452 free(unhexlified); 1453 1454 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1455 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1456 CU_ASSERT(retained_size > 0); 1457 1458 /* Make sure that different cipher suites produce different TLS PSKs. */ 1459 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1460 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1461 CU_ASSERT(tls_size > 0); 1462 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1463 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1464 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1465 1466 /* Make sure that passing unknown value as hash errors out the function. */ 1467 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1468 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1469 1470 /* Make sure that passing buffer insufficient in size errors out the function. */ 1471 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1472 too_small_psk_tls, sizeof(too_small_psk_tls), 1473 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1474 } 1475 1476 int 1477 main(int argc, char **argv) 1478 { 1479 CU_pSuite suite = NULL; 1480 unsigned int num_failures; 1481 1482 CU_initialize_registry(); 1483 1484 suite = CU_add_suite("nvmf", NULL, NULL); 1485 1486 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1487 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1488 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1489 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1490 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1491 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1492 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1493 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1494 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1495 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1496 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1497 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1498 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1499 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1500 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1501 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1502 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1503 1504 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1505 CU_cleanup_registry(); 1506 return num_failures; 1507 } 1508