1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 41 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 42 int, 43 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 44 0); 45 46 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 47 struct spdk_nvmf_ctrlr *, 48 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 49 NULL); 50 51 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 52 struct spdk_nvmf_subsystem *, 53 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 54 NULL); 55 56 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 57 bool, 58 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 59 true); 60 61 DEFINE_STUB(nvmf_subsystem_find_listener, 62 struct spdk_nvmf_subsystem_listener *, 63 (struct spdk_nvmf_subsystem *subsystem, 64 const struct spdk_nvme_transport_id *trid), 65 (void *)0x1); 66 67 DEFINE_STUB_V(nvmf_get_discovery_log_page, 68 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 69 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 70 71 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 72 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 73 74 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 75 struct spdk_nvmf_ns *, 76 (struct spdk_nvmf_subsystem *subsystem), 77 NULL); 78 79 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 80 struct spdk_nvmf_ns *, 81 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 82 NULL); 83 84 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 85 bool, 86 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 87 true); 88 89 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 90 bool, 91 (struct spdk_nvmf_ctrlr *ctrlr), 92 false); 93 94 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 95 bool, 96 (struct spdk_nvmf_ctrlr *ctrlr), 97 false); 98 99 DEFINE_STUB(nvmf_ctrlr_copy_supported, 100 bool, 101 (struct spdk_nvmf_ctrlr *ctrlr), 102 false); 103 104 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 105 int, 106 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 107 struct spdk_nvmf_request *req), 108 0); 109 110 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 111 int, 112 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 113 struct spdk_nvmf_request *req), 114 0); 115 116 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 117 int, 118 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct spdk_nvmf_request *req), 120 0); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 162 0); 163 164 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 165 bool, 166 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 167 false); 168 169 DEFINE_STUB(nvmf_transport_req_complete, 170 int, 171 (struct spdk_nvmf_request *req), 172 0); 173 174 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 175 bool, 176 (struct spdk_bdev *bdev), 177 false); 178 179 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 180 int, 181 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 182 struct spdk_nvmf_request *req), 183 0); 184 185 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 186 187 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 188 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 189 struct spdk_nvmf_transport *transport)); 190 191 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 192 int, 193 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 194 0); 195 196 DEFINE_STUB(spdk_sock_group_get_ctx, 197 void *, 198 (struct spdk_sock_group *group), 199 NULL); 200 201 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 202 203 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 204 enum spdk_nvme_transport_type trtype)); 205 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 206 207 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 208 209 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 210 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 211 212 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 213 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 214 215 DEFINE_STUB(nvmf_transport_req_free, 216 int, 217 (struct spdk_nvmf_request *req), 218 0); 219 220 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 221 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 222 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 223 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 224 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 225 226 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 227 (const struct spdk_bdev *bdev), 0); 228 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 229 (const struct spdk_bdev *bdev), 0); 230 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 231 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 232 233 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 234 (const struct spdk_nvme_ns_data *nsdata), 0); 235 236 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 237 238 struct spdk_io_channel * 239 spdk_accel_get_io_channel(void) 240 { 241 return spdk_get_io_channel(g_accel_p); 242 } 243 244 DEFINE_STUB(spdk_accel_submit_crc32cv, 245 int, 246 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 247 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 248 0); 249 250 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 251 int, 252 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 253 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 254 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 255 0) 256 257 struct spdk_bdev { 258 int ut_mock; 259 uint64_t blockcnt; 260 }; 261 262 int 263 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 264 const struct spdk_nvme_transport_id *trid2) 265 { 266 return 0; 267 } 268 269 const char * 270 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 271 { 272 switch (trtype) { 273 case SPDK_NVME_TRANSPORT_PCIE: 274 return "PCIe"; 275 case SPDK_NVME_TRANSPORT_RDMA: 276 return "RDMA"; 277 case SPDK_NVME_TRANSPORT_FC: 278 return "FC"; 279 default: 280 return NULL; 281 } 282 } 283 284 int 285 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 286 { 287 int len, i; 288 289 if (trstring == NULL) { 290 return -EINVAL; 291 } 292 293 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 294 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 295 return -EINVAL; 296 } 297 298 /* cast official trstring to uppercase version of input. */ 299 for (i = 0; i < len; i++) { 300 trid->trstring[i] = toupper(trstring[i]); 301 } 302 return 0; 303 } 304 305 int 306 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 307 { 308 return 0; 309 } 310 311 int 312 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 313 struct spdk_nvmf_transport_poll_group *group, 314 struct spdk_nvmf_transport *transport, 315 uint32_t length) 316 { 317 /* length more than 1 io unit length will fail. */ 318 if (length >= transport->opts.io_unit_size) { 319 return -EINVAL; 320 } 321 322 req->iovcnt = 1; 323 req->iov[0].iov_base = (void *)0xDEADBEEF; 324 325 return 0; 326 } 327 328 329 void 330 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 331 bool dif_insert_or_strip) 332 { 333 uint64_t num_blocks; 334 335 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 336 num_blocks = ns->bdev->blockcnt; 337 nsdata->nsze = num_blocks; 338 nsdata->ncap = num_blocks; 339 nsdata->nuse = num_blocks; 340 nsdata->nlbaf = 0; 341 nsdata->flbas.format = 0; 342 nsdata->flbas.msb_format = 0; 343 nsdata->lbaf[0].lbads = spdk_u32log2(512); 344 } 345 346 const char * 347 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 348 { 349 return subsystem->sn; 350 } 351 352 const char * 353 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 354 { 355 return subsystem->mn; 356 } 357 358 static void 359 test_nvmf_tcp_create(void) 360 { 361 struct spdk_thread *thread; 362 struct spdk_nvmf_transport *transport; 363 struct spdk_nvmf_tcp_transport *ttransport; 364 struct spdk_nvmf_transport_opts opts; 365 366 thread = spdk_thread_create(NULL, NULL); 367 SPDK_CU_ASSERT_FATAL(thread != NULL); 368 spdk_set_thread(thread); 369 370 /* case 1 */ 371 memset(&opts, 0, sizeof(opts)); 372 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 373 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 374 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 375 opts.max_io_size = UT_MAX_IO_SIZE; 376 opts.io_unit_size = UT_IO_UNIT_SIZE; 377 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 378 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 379 /* expect success */ 380 transport = nvmf_tcp_create(&opts); 381 CU_ASSERT_PTR_NOT_NULL(transport); 382 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 383 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 384 transport->opts = opts; 385 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 386 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 387 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 388 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 389 /* destroy transport */ 390 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 391 392 /* case 2 */ 393 memset(&opts, 0, sizeof(opts)); 394 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 395 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 396 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 397 opts.max_io_size = UT_MAX_IO_SIZE; 398 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 399 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 400 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 401 /* expect success */ 402 transport = nvmf_tcp_create(&opts); 403 CU_ASSERT_PTR_NOT_NULL(transport); 404 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 405 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 406 transport->opts = opts; 407 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 408 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 409 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 410 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 411 /* destroy transport */ 412 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 413 414 /* case 3 */ 415 memset(&opts, 0, sizeof(opts)); 416 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 417 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 418 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 419 opts.max_io_size = UT_MAX_IO_SIZE; 420 opts.io_unit_size = 16; 421 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 422 /* expect fails */ 423 transport = nvmf_tcp_create(&opts); 424 CU_ASSERT_PTR_NULL(transport); 425 426 spdk_thread_exit(thread); 427 while (!spdk_thread_is_exited(thread)) { 428 spdk_thread_poll(thread, 0, 0); 429 } 430 spdk_thread_destroy(thread); 431 } 432 433 static void 434 test_nvmf_tcp_destroy(void) 435 { 436 struct spdk_thread *thread; 437 struct spdk_nvmf_transport *transport; 438 struct spdk_nvmf_transport_opts opts; 439 440 thread = spdk_thread_create(NULL, NULL); 441 SPDK_CU_ASSERT_FATAL(thread != NULL); 442 spdk_set_thread(thread); 443 444 /* case 1 */ 445 memset(&opts, 0, sizeof(opts)); 446 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 447 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 448 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 449 opts.max_io_size = UT_MAX_IO_SIZE; 450 opts.io_unit_size = UT_IO_UNIT_SIZE; 451 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 452 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 453 transport = nvmf_tcp_create(&opts); 454 CU_ASSERT_PTR_NOT_NULL(transport); 455 transport->opts = opts; 456 /* destroy transport */ 457 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 458 459 spdk_thread_exit(thread); 460 while (!spdk_thread_is_exited(thread)) { 461 spdk_thread_poll(thread, 0, 0); 462 } 463 spdk_thread_destroy(thread); 464 } 465 466 static void 467 init_accel(void) 468 { 469 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 470 sizeof(int), "accel_p"); 471 } 472 473 static void 474 fini_accel(void) 475 { 476 spdk_io_device_unregister(g_accel_p, NULL); 477 } 478 479 static void 480 test_nvmf_tcp_poll_group_create(void) 481 { 482 struct spdk_nvmf_transport *transport; 483 struct spdk_nvmf_transport_poll_group *group; 484 struct spdk_nvmf_tcp_poll_group *tgroup; 485 struct spdk_thread *thread; 486 struct spdk_nvmf_transport_opts opts; 487 struct spdk_sock_group grp = {}; 488 489 thread = spdk_thread_create(NULL, NULL); 490 SPDK_CU_ASSERT_FATAL(thread != NULL); 491 spdk_set_thread(thread); 492 493 init_accel(); 494 495 memset(&opts, 0, sizeof(opts)); 496 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 497 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 498 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 499 opts.max_io_size = UT_MAX_IO_SIZE; 500 opts.io_unit_size = UT_IO_UNIT_SIZE; 501 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 502 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 503 transport = nvmf_tcp_create(&opts); 504 CU_ASSERT_PTR_NOT_NULL(transport); 505 transport->opts = opts; 506 MOCK_SET(spdk_sock_group_create, &grp); 507 group = nvmf_tcp_poll_group_create(transport, NULL); 508 MOCK_CLEAR_P(spdk_sock_group_create); 509 SPDK_CU_ASSERT_FATAL(group); 510 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 511 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 512 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 513 } 514 group->transport = transport; 515 nvmf_tcp_poll_group_destroy(group); 516 nvmf_tcp_destroy(transport, NULL, NULL); 517 518 fini_accel(); 519 spdk_thread_exit(thread); 520 while (!spdk_thread_is_exited(thread)) { 521 spdk_thread_poll(thread, 0, 0); 522 } 523 spdk_thread_destroy(thread); 524 } 525 526 static void 527 test_nvmf_tcp_send_c2h_data(void) 528 { 529 struct spdk_thread *thread; 530 struct spdk_nvmf_tcp_transport ttransport = {}; 531 struct spdk_nvmf_tcp_qpair tqpair = {}; 532 struct spdk_nvmf_tcp_req tcp_req = {}; 533 struct nvme_tcp_pdu pdu = {}; 534 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 535 536 ttransport.tcp_opts.c2h_success = true; 537 thread = spdk_thread_create(NULL, NULL); 538 SPDK_CU_ASSERT_FATAL(thread != NULL); 539 spdk_set_thread(thread); 540 541 tcp_req.pdu = &pdu; 542 tcp_req.req.length = 300; 543 tcp_req.req.qpair = &tqpair.qpair; 544 545 tqpair.qpair.transport = &ttransport.transport; 546 547 /* Set qpair state to make unrelated operations NOP */ 548 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 549 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 550 551 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 552 553 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 554 tcp_req.req.iov[0].iov_len = 101; 555 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 556 tcp_req.req.iov[1].iov_len = 100; 557 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 558 tcp_req.req.iov[2].iov_len = 99; 559 tcp_req.req.iovcnt = 3; 560 tcp_req.req.length = 300; 561 562 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 563 564 c2h_data = &pdu.hdr.c2h_data; 565 CU_ASSERT(c2h_data->datao == 0); 566 CU_ASSERT(c2h_data->datal = 300); 567 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 568 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 569 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 570 571 CU_ASSERT(pdu.data_iovcnt == 3); 572 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 573 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 574 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 575 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 576 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 577 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 578 579 tcp_req.pdu_in_use = false; 580 tcp_req.rsp.cdw0 = 1; 581 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 582 583 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 584 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 585 586 ttransport.tcp_opts.c2h_success = false; 587 tcp_req.pdu_in_use = false; 588 tcp_req.rsp.cdw0 = 0; 589 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 590 591 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 592 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 593 594 tcp_req.pdu_in_use = false; 595 tcp_req.rsp.cdw0 = 1; 596 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 597 598 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 599 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 600 601 spdk_thread_exit(thread); 602 while (!spdk_thread_is_exited(thread)) { 603 spdk_thread_poll(thread, 0, 0); 604 } 605 spdk_thread_destroy(thread); 606 } 607 608 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 609 610 static void 611 test_nvmf_tcp_h2c_data_hdr_handle(void) 612 { 613 struct spdk_nvmf_tcp_transport ttransport = {}; 614 struct spdk_nvmf_tcp_qpair tqpair = {}; 615 struct nvme_tcp_pdu pdu = {}; 616 struct spdk_nvmf_tcp_req tcp_req = {}; 617 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 618 619 /* Set qpair state to make unrelated operations NOP */ 620 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 621 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 622 tqpair.resource_count = 1; 623 tqpair.reqs = &tcp_req; 624 625 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 626 tcp_req.req.iov[0].iov_len = 101; 627 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 628 tcp_req.req.iov[1].iov_len = 99; 629 tcp_req.req.iovcnt = 2; 630 tcp_req.req.length = 200; 631 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 632 633 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 634 tcp_req.req.cmd->nvme_cmd.cid = 1; 635 tcp_req.ttag = 1; 636 637 h2c_data = &pdu.hdr.h2c_data; 638 h2c_data->cccid = 1; 639 h2c_data->ttag = 1; 640 h2c_data->datao = 0; 641 h2c_data->datal = 200; 642 643 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 644 645 CU_ASSERT(pdu.data_iovcnt == 2); 646 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 647 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 648 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 649 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 650 } 651 652 653 static void 654 test_nvmf_tcp_in_capsule_data_handle(void) 655 { 656 struct spdk_nvmf_tcp_transport ttransport = {}; 657 struct spdk_nvmf_tcp_qpair tqpair = {}; 658 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 659 union nvmf_c2h_msg rsp0 = {}; 660 union nvmf_c2h_msg rsp = {}; 661 662 struct spdk_nvmf_request *req_temp = NULL; 663 struct spdk_nvmf_tcp_req tcp_req2 = {}; 664 struct spdk_nvmf_tcp_req tcp_req1 = {}; 665 666 struct spdk_nvme_tcp_cmd *capsule_data; 667 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 668 struct spdk_nvme_sgl_descriptor *sgl; 669 670 struct spdk_nvmf_transport_poll_group *group; 671 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 672 struct spdk_sock_group grp = {}; 673 674 tqpair.pdu_in_progress = &pdu_in_progress; 675 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 676 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 677 678 tcp_group.sock_group = &grp; 679 TAILQ_INIT(&tcp_group.qpairs); 680 group = &tcp_group.group; 681 group->transport = &ttransport.transport; 682 STAILQ_INIT(&group->pending_buf_queue); 683 tqpair.group = &tcp_group; 684 685 TAILQ_INIT(&tqpair.tcp_req_free_queue); 686 TAILQ_INIT(&tqpair.tcp_req_working_queue); 687 688 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 689 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 690 tqpair.qpair.transport = &ttransport.transport; 691 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 692 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 693 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 694 695 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 696 tcp_req2.req.qpair = &tqpair.qpair; 697 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 698 tcp_req2.req.rsp = &rsp; 699 700 /* init tcp_req1 */ 701 tcp_req1.req.qpair = &tqpair.qpair; 702 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 703 tcp_req1.req.rsp = &rsp0; 704 tcp_req1.state = TCP_REQUEST_STATE_NEW; 705 706 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 707 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 708 709 /* init pdu, make pdu need sgl buff */ 710 pdu = tqpair.pdu_in_progress; 711 capsule_data = &pdu->hdr.capsule_cmd; 712 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 713 sgl = &capsule_data->ccsqe.dptr.sgl1; 714 715 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 716 capsule_data->common.hlen = sizeof(*capsule_data); 717 capsule_data->common.plen = 1096; 718 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 719 720 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 721 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 722 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 723 724 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 725 726 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 727 nvmf_tcp_req_process(&ttransport, &tcp_req1); 728 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 729 730 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 731 732 /* process tqpair capsule req. but we still remain req in pending_buff. */ 733 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 734 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 735 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 736 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 737 if (req_temp == &tcp_req2.req) { 738 break; 739 } 740 } 741 CU_ASSERT(req_temp == NULL); 742 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 743 } 744 745 static void 746 test_nvmf_tcp_qpair_init_mem_resource(void) 747 { 748 int rc; 749 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 750 struct spdk_nvmf_transport transport = {}; 751 struct spdk_thread *thread; 752 753 thread = spdk_thread_create(NULL, NULL); 754 SPDK_CU_ASSERT_FATAL(thread != NULL); 755 spdk_set_thread(thread); 756 757 tqpair = calloc(1, sizeof(*tqpair)); 758 tqpair->qpair.transport = &transport; 759 760 nvmf_tcp_opts_init(&transport.opts); 761 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 762 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 763 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 764 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 765 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 766 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 767 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 768 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 769 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 770 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 771 CU_ASSERT(transport.opts.transport_specific == NULL); 772 773 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 774 CU_ASSERT(rc == 0); 775 CU_ASSERT(tqpair->host_hdgst_enable == true); 776 CU_ASSERT(tqpair->host_ddgst_enable == true); 777 778 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 779 CU_ASSERT(rc == 0); 780 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 781 CU_ASSERT(tqpair->reqs != NULL); 782 CU_ASSERT(tqpair->bufs != NULL); 783 CU_ASSERT(tqpair->pdus != NULL); 784 /* Just to check the first and last entry */ 785 CU_ASSERT(tqpair->reqs[0].ttag == 1); 786 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 787 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 788 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 789 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 790 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 791 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 792 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 793 CU_ASSERT(tqpair->reqs[127].ttag == 128); 794 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 795 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 796 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 797 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 798 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 799 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 800 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 801 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 802 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 803 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 804 CU_ASSERT(tqpair->pdu_in_progress == 805 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 806 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 807 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 808 809 /* Free all of tqpair resource */ 810 nvmf_tcp_qpair_destroy(tqpair); 811 812 spdk_thread_exit(thread); 813 while (!spdk_thread_is_exited(thread)) { 814 spdk_thread_poll(thread, 0, 0); 815 } 816 spdk_thread_destroy(thread); 817 } 818 819 static void 820 test_nvmf_tcp_send_c2h_term_req(void) 821 { 822 struct spdk_nvmf_tcp_qpair tqpair = {}; 823 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 824 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 825 uint32_t error_offset = 1; 826 827 mgmt_pdu.qpair = &tqpair; 828 tqpair.mgmt_pdu = &mgmt_pdu; 829 tqpair.pdu_in_progress = &pdu_in_progress; 830 tqpair.tcp_pdu_working_count = 1; 831 832 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 833 pdu.hdr.common.hlen = 64; 834 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 835 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 836 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 837 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 838 pdu.hdr.common.hlen); 839 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 840 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 841 842 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 843 pdu.hdr.common.hlen = 255; 844 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 845 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 846 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 847 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 848 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 849 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 850 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 851 } 852 853 static void 854 test_nvmf_tcp_send_capsule_resp_pdu(void) 855 { 856 struct spdk_nvmf_tcp_req tcp_req = {}; 857 struct spdk_nvmf_tcp_qpair tqpair = {}; 858 struct nvme_tcp_pdu pdu = {}; 859 860 tcp_req.pdu_in_use = false; 861 tcp_req.req.qpair = &tqpair.qpair; 862 tcp_req.pdu = &pdu; 863 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 864 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 865 tqpair.host_hdgst_enable = true; 866 867 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 868 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 869 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 870 SPDK_NVME_TCP_DIGEST_LEN); 871 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 872 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 873 sizeof(struct spdk_nvme_cpl))); 874 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 875 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 876 CU_ASSERT(pdu.cb_arg == &tcp_req); 877 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 878 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 879 880 /* hdgst disable */ 881 tqpair.host_hdgst_enable = false; 882 tcp_req.pdu_in_use = false; 883 memset(&pdu, 0, sizeof(pdu)); 884 885 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 886 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 887 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 888 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 889 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 890 sizeof(struct spdk_nvme_cpl))); 891 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 892 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 893 CU_ASSERT(pdu.cb_arg == &tcp_req); 894 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 895 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 896 } 897 898 static void 899 test_nvmf_tcp_icreq_handle(void) 900 { 901 struct spdk_nvmf_tcp_transport ttransport = {}; 902 struct spdk_nvmf_tcp_qpair tqpair = {}; 903 struct nvme_tcp_pdu pdu = {}; 904 struct nvme_tcp_pdu mgmt_pdu = {}; 905 struct nvme_tcp_pdu pdu_in_progress = {}; 906 struct spdk_nvme_tcp_ic_resp *ic_resp; 907 908 mgmt_pdu.qpair = &tqpair; 909 tqpair.mgmt_pdu = &mgmt_pdu; 910 tqpair.pdu_in_progress = &pdu_in_progress; 911 tqpair.tcp_pdu_working_count = 1; 912 913 /* case 1: Expected ICReq PFV 0 and got are different. */ 914 pdu.hdr.ic_req.pfv = 1; 915 916 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 917 918 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 919 920 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 921 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 922 923 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 924 925 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 926 927 /* case 3: Expect: PASS. */ 928 ttransport.transport.opts.max_io_size = 32; 929 pdu.hdr.ic_req.pfv = 0; 930 tqpair.host_hdgst_enable = false; 931 tqpair.host_ddgst_enable = false; 932 tqpair.recv_buf_size = 64; 933 pdu.hdr.ic_req.hpda = 16; 934 935 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 936 937 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 938 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 939 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 940 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 941 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 942 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 943 CU_ASSERT(ic_resp->pfv == 0); 944 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 945 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 946 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 947 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 948 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 949 } 950 951 static void 952 test_nvmf_tcp_check_xfer_type(void) 953 { 954 const uint16_t cid = 0xAA; 955 struct spdk_nvmf_tcp_transport ttransport = {}; 956 struct spdk_nvmf_tcp_qpair tqpair = {}; 957 struct nvme_tcp_pdu pdu_in_progress = {}; 958 union nvmf_c2h_msg rsp0 = {}; 959 960 struct spdk_nvmf_tcp_req tcp_req = {}; 961 struct nvme_tcp_pdu rsp_pdu = {}; 962 963 struct spdk_nvme_tcp_cmd *capsule_data; 964 struct spdk_nvme_sgl_descriptor *sgl; 965 966 struct spdk_nvmf_transport_poll_group *group; 967 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 968 struct spdk_sock_group grp = {}; 969 970 tqpair.pdu_in_progress = &pdu_in_progress; 971 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 972 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 973 974 tcp_group.sock_group = &grp; 975 TAILQ_INIT(&tcp_group.qpairs); 976 group = &tcp_group.group; 977 group->transport = &ttransport.transport; 978 STAILQ_INIT(&group->pending_buf_queue); 979 tqpair.group = &tcp_group; 980 981 TAILQ_INIT(&tqpair.tcp_req_free_queue); 982 TAILQ_INIT(&tqpair.tcp_req_working_queue); 983 984 tqpair.qpair.transport = &ttransport.transport; 985 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 986 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 987 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 988 989 /* init tcp_req */ 990 tcp_req.req.qpair = &tqpair.qpair; 991 tcp_req.pdu = &rsp_pdu; 992 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 993 tcp_req.req.rsp = &rsp0; 994 tcp_req.state = TCP_REQUEST_STATE_NEW; 995 996 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 997 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 998 999 /* init pdu, make pdu need sgl buff */ 1000 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1001 sgl = &capsule_data->ccsqe.dptr.sgl1; 1002 1003 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1004 capsule_data->common.hlen = sizeof(*capsule_data); 1005 capsule_data->common.plen = 1096; 1006 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1007 /* Need to set to a non zero valid to check it gets copied to the response */ 1008 capsule_data->ccsqe.cid = cid; 1009 1010 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1011 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1012 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1013 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1014 1015 /* Process a command and ensure that it fails and the request is set up to return an error */ 1016 nvmf_tcp_req_process(&ttransport, &tcp_req); 1017 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1018 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1019 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1020 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1021 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1022 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1023 } 1024 1025 static void 1026 test_nvmf_tcp_invalid_sgl(void) 1027 { 1028 const uint16_t cid = 0xAABB; 1029 struct spdk_nvmf_tcp_transport ttransport = {}; 1030 struct spdk_nvmf_tcp_qpair tqpair = {}; 1031 struct nvme_tcp_pdu pdu_in_progress = {}; 1032 union nvmf_c2h_msg rsp0 = {}; 1033 1034 struct spdk_nvmf_tcp_req tcp_req = {}; 1035 struct nvme_tcp_pdu rsp_pdu = {}; 1036 struct nvme_tcp_pdu mgmt_pdu = {}; 1037 1038 struct spdk_nvme_tcp_cmd *capsule_data; 1039 struct spdk_nvme_sgl_descriptor *sgl; 1040 1041 struct spdk_nvmf_transport_poll_group *group; 1042 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1043 struct spdk_sock_group grp = {}; 1044 1045 tqpair.pdu_in_progress = &pdu_in_progress; 1046 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1047 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1048 1049 tcp_group.sock_group = &grp; 1050 TAILQ_INIT(&tcp_group.qpairs); 1051 group = &tcp_group.group; 1052 group->transport = &ttransport.transport; 1053 STAILQ_INIT(&group->pending_buf_queue); 1054 tqpair.group = &tcp_group; 1055 1056 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1057 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1058 1059 tqpair.qpair.transport = &ttransport.transport; 1060 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1061 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1062 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1063 1064 /* init tcp_req */ 1065 tcp_req.req.qpair = &tqpair.qpair; 1066 tcp_req.pdu = &rsp_pdu; 1067 tcp_req.pdu->qpair = &tqpair; 1068 tqpair.mgmt_pdu = &mgmt_pdu; 1069 tqpair.mgmt_pdu->qpair = &tqpair; 1070 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1071 tcp_req.req.rsp = &rsp0; 1072 tcp_req.state = TCP_REQUEST_STATE_NEW; 1073 1074 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1075 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1076 1077 /* init pdu, make pdu need sgl buff */ 1078 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1079 sgl = &capsule_data->ccsqe.dptr.sgl1; 1080 1081 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1082 capsule_data->common.hlen = sizeof(*capsule_data); 1083 capsule_data->common.plen = 1096; 1084 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1085 /* Need to set to a non zero valid to check it gets copied to the response */ 1086 capsule_data->ccsqe.cid = cid; 1087 1088 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1089 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1090 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1091 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1092 1093 /* Process a command and ensure that it fails and the request is set up to return an error */ 1094 nvmf_tcp_req_process(&ttransport, &tcp_req); 1095 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1096 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1097 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1098 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1099 } 1100 1101 static void 1102 test_nvmf_tcp_pdu_ch_handle(void) 1103 { 1104 struct spdk_nvmf_tcp_qpair tqpair = {}; 1105 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1106 1107 mgmt_pdu.qpair = &tqpair; 1108 tqpair.mgmt_pdu = &mgmt_pdu; 1109 tqpair.pdu_in_progress = &pdu_in_progress; 1110 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1111 tqpair.cpda = 0; 1112 1113 /* Test case: Already received ICreq PDU. Expect: fail */ 1114 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1115 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1116 nvmf_tcp_pdu_ch_handle(&tqpair); 1117 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1118 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1119 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1120 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1121 1122 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1123 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1124 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1125 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1126 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1127 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1128 nvmf_tcp_pdu_ch_handle(&tqpair); 1129 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1130 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1131 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1132 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1133 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1134 1135 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1136 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1137 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1138 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1139 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1140 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1141 nvmf_tcp_pdu_ch_handle(&tqpair); 1142 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1143 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1144 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1145 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1146 1147 /* Test case: Unexpected PDU type. Expect: fail */ 1148 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1149 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1150 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1151 tqpair.pdu_in_progress->hdr.common.plen = 0; 1152 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1153 nvmf_tcp_pdu_ch_handle(&tqpair); 1154 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1155 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1156 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1157 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1158 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1159 1160 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1161 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1162 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1163 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1164 tqpair.pdu_in_progress->hdr.common.plen = 0; 1165 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1166 nvmf_tcp_pdu_ch_handle(&tqpair); 1167 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1168 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1169 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1170 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1171 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1172 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1173 1174 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1175 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1176 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1177 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1178 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1179 tqpair.pdu_in_progress->hdr.common.plen = 0; 1180 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1181 nvmf_tcp_pdu_ch_handle(&tqpair); 1182 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1183 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1184 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1185 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1186 struct spdk_nvme_tcp_term_req_hdr)); 1187 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1188 1189 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1190 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1191 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1192 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1193 tqpair.pdu_in_progress->hdr.common.plen = 0; 1194 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1195 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1196 nvmf_tcp_pdu_ch_handle(&tqpair); 1197 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1198 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1199 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1200 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1201 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1202 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1203 1204 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1205 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1206 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1207 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1208 tqpair.pdu_in_progress->hdr.common.plen = 0; 1209 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1210 nvmf_tcp_pdu_ch_handle(&tqpair); 1211 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1212 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1213 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1214 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1215 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1216 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1217 1218 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1219 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1220 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1221 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1222 tqpair.cpda = 1; 1223 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1224 tqpair.pdu_in_progress->hdr.common.plen = 0; 1225 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1226 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1227 nvmf_tcp_pdu_ch_handle(&tqpair); 1228 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1229 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1230 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1231 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1232 struct spdk_nvme_tcp_term_req_hdr)); 1233 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1234 1235 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1236 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1237 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1238 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1239 tqpair.cpda = 1; 1240 tqpair.pdu_in_progress->hdr.common.plen = 0; 1241 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1242 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1243 nvmf_tcp_pdu_ch_handle(&tqpair); 1244 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1245 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1246 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1247 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1248 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1249 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1250 1251 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1252 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1253 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1254 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1255 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1256 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1257 nvmf_tcp_pdu_ch_handle(&tqpair); 1258 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1259 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1260 struct spdk_nvme_tcp_common_pdu_hdr)); 1261 } 1262 1263 static void 1264 test_nvmf_tcp_tls_add_remove_credentials(void) 1265 { 1266 struct spdk_thread *thread; 1267 struct spdk_nvmf_transport *transport; 1268 struct spdk_nvmf_tcp_transport *ttransport; 1269 struct spdk_nvmf_transport_opts opts; 1270 struct spdk_nvmf_subsystem subsystem; 1271 struct tcp_psk_entry *entry; 1272 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1273 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1274 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1275 char *psk_file_path = "/tmp/psk.txt"; 1276 bool found = false; 1277 FILE *psk_file = NULL; 1278 mode_t oldmask; 1279 1280 thread = spdk_thread_create(NULL, NULL); 1281 SPDK_CU_ASSERT_FATAL(thread != NULL); 1282 spdk_set_thread(thread); 1283 1284 memset(&opts, 0, sizeof(opts)); 1285 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1286 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1287 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1288 opts.max_io_size = UT_MAX_IO_SIZE; 1289 opts.io_unit_size = UT_IO_UNIT_SIZE; 1290 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1291 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1292 transport = nvmf_tcp_create(&opts); 1293 1294 memset(&subsystem, 0, sizeof(subsystem)); 1295 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1296 1297 /* Create a text file containing PSK in interchange format. */ 1298 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1299 psk_file = fopen(psk_file_path, "w"); 1300 CU_ASSERT(psk_file != NULL); 1301 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1302 CU_ASSERT(fclose(psk_file) == 0); 1303 umask(oldmask); 1304 1305 struct spdk_json_val psk_json[] = { 1306 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1307 {"psk", 3, SPDK_JSON_VAL_NAME}, 1308 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1309 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1310 }; 1311 1312 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1313 1314 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1315 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1316 if ((strcmp(subnqn, entry->subnqn) == 0) && 1317 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1318 found = true; 1319 } 1320 } 1321 1322 CU_ASSERT(found == true); 1323 found = false; 1324 1325 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1326 1327 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1328 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1329 if ((strcmp(subnqn, entry->subnqn) == 0) && 1330 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1331 found = true; 1332 } 1333 } 1334 1335 CU_ASSERT(found == false); 1336 1337 CU_ASSERT(remove(psk_file_path) == 0); 1338 1339 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1340 1341 spdk_thread_exit(thread); 1342 while (!spdk_thread_is_exited(thread)) { 1343 spdk_thread_poll(thread, 0, 0); 1344 } 1345 spdk_thread_destroy(thread); 1346 } 1347 1348 static void 1349 test_nvmf_tcp_tls_generate_psk_id(void) 1350 { 1351 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1352 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1353 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1354 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1355 char too_small_psk_id[5] = {}; 1356 1357 /* Check if we can generate expected PSK id. */ 1358 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1359 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1360 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1361 1362 /* Test with a buffer that is too small to fit PSK id. */ 1363 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1364 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1365 1366 /* Test with unknown cipher suite. */ 1367 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1368 subnqn, UINT8_MAX) != 0); 1369 } 1370 1371 static void 1372 test_nvmf_tcp_tls_generate_retained_psk(void) 1373 { 1374 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1375 const char psk_reference1[] = {"1234567890ABCDEF"}; 1376 const char psk_reference2[] = {"FEDCBA0987654321"}; 1377 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1378 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1379 char *unhexlified1; 1380 char *unhexlified2; 1381 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1382 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1383 uint8_t too_small_psk_retained[5] = {}; 1384 int psk_retained_len1, psk_retained_len2; 1385 int retained_size; 1386 1387 unhexlified1 = spdk_unhexlify(psk_reference1); 1388 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1389 unhexlified2 = spdk_unhexlify(psk_reference2); 1390 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1391 1392 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1393 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1394 free(unhexlified1); 1395 free(unhexlified2); 1396 1397 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1398 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1399 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1400 CU_ASSERT(retained_size > 0); 1401 1402 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1403 psk_retained2, 1404 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1405 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1406 1407 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1408 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1409 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1410 CU_ASSERT(psk_retained_len1 > 0); 1411 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1412 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1413 CU_ASSERT(psk_retained_len2 > 0); 1414 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1415 1416 /* Make sure that passing unknown value as hash errors out the function. */ 1417 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1418 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1419 1420 /* Make sure that passing buffer insufficient in size errors out the function. */ 1421 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1422 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1423 } 1424 1425 static void 1426 test_nvmf_tcp_tls_generate_tls_psk(void) 1427 { 1428 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1429 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1430 const char psk_reference[] = {"1234567890ABCDEF"}; 1431 char *unhexlified; 1432 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1433 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1434 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1435 uint8_t too_small_psk_tls[5] = {}; 1436 int retained_size, tls_size; 1437 1438 unhexlified = spdk_unhexlify(psk_reference); 1439 CU_ASSERT(unhexlified != NULL); 1440 1441 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1442 free(unhexlified); 1443 1444 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1445 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1446 CU_ASSERT(retained_size > 0); 1447 1448 /* Make sure that different cipher suites produce different TLS PSKs. */ 1449 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1450 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1451 CU_ASSERT(tls_size > 0); 1452 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1453 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1454 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1455 1456 /* Make sure that passing unknown value as hash errors out the function. */ 1457 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1458 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1459 1460 /* Make sure that passing buffer insufficient in size errors out the function. */ 1461 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1462 too_small_psk_tls, sizeof(too_small_psk_tls), 1463 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1464 } 1465 1466 int 1467 main(int argc, char **argv) 1468 { 1469 CU_pSuite suite = NULL; 1470 unsigned int num_failures; 1471 1472 CU_initialize_registry(); 1473 1474 suite = CU_add_suite("nvmf", NULL, NULL); 1475 1476 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1477 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1478 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1479 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1480 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1481 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1482 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1483 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1484 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1485 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1486 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1487 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1488 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1489 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1490 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1491 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1492 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1493 1494 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1495 CU_cleanup_registry(); 1496 return num_failures; 1497 } 1498