1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 41 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 42 int, 43 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 44 0); 45 46 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 47 struct spdk_nvmf_ctrlr *, 48 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 49 NULL); 50 51 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 52 struct spdk_nvmf_subsystem *, 53 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 54 NULL); 55 56 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 57 bool, 58 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 59 true); 60 61 DEFINE_STUB(nvmf_subsystem_find_listener, 62 struct spdk_nvmf_subsystem_listener *, 63 (struct spdk_nvmf_subsystem *subsystem, 64 const struct spdk_nvme_transport_id *trid), 65 (void *)0x1); 66 67 DEFINE_STUB_V(nvmf_get_discovery_log_page, 68 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 69 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 70 71 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 72 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 73 74 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 75 struct spdk_nvmf_ns *, 76 (struct spdk_nvmf_subsystem *subsystem), 77 NULL); 78 79 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 80 struct spdk_nvmf_ns *, 81 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 82 NULL); 83 84 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 85 bool, 86 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 87 true); 88 89 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 90 bool, 91 (struct spdk_nvmf_ctrlr *ctrlr), 92 false); 93 94 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 95 bool, 96 (struct spdk_nvmf_ctrlr *ctrlr), 97 false); 98 99 DEFINE_STUB(nvmf_ctrlr_copy_supported, 100 bool, 101 (struct spdk_nvmf_ctrlr *ctrlr), 102 false); 103 104 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 105 int, 106 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 107 struct spdk_nvmf_request *req), 108 0); 109 110 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 111 int, 112 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 113 struct spdk_nvmf_request *req), 114 0); 115 116 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 117 int, 118 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct spdk_nvmf_request *req), 120 0); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 162 0); 163 164 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 165 bool, 166 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 167 false); 168 169 DEFINE_STUB(nvmf_transport_req_complete, 170 int, 171 (struct spdk_nvmf_request *req), 172 0); 173 174 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 175 bool, 176 (struct spdk_bdev *bdev), 177 false); 178 179 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 180 int, 181 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 182 struct spdk_nvmf_request *req), 183 0); 184 185 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 186 187 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 188 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 189 struct spdk_nvmf_transport *transport)); 190 191 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 192 int, 193 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 194 0); 195 196 DEFINE_STUB(spdk_sock_group_get_ctx, 197 void *, 198 (struct spdk_sock_group *group), 199 NULL); 200 201 DEFINE_STUB(spdk_sock_set_priority, 202 int, 203 (struct spdk_sock *sock, int priority), 204 0); 205 206 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 207 208 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 209 enum spdk_nvme_transport_type trtype)); 210 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 211 212 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 213 214 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 215 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 216 217 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 218 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 219 220 DEFINE_STUB(nvmf_transport_req_free, 221 int, 222 (struct spdk_nvmf_request *req), 223 0); 224 225 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 226 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 227 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 228 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 229 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 230 231 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 232 (const struct spdk_bdev *bdev), 0); 233 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 234 (const struct spdk_bdev *bdev), 0); 235 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 236 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 237 238 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 239 (const struct spdk_nvme_ns_data *nsdata), 0); 240 241 DEFINE_STUB(spdk_sock_get_default_impl_name, const char *, (void), ""); 242 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 243 244 struct spdk_io_channel * 245 spdk_accel_get_io_channel(void) 246 { 247 return spdk_get_io_channel(g_accel_p); 248 } 249 250 DEFINE_STUB(spdk_accel_submit_crc32cv, 251 int, 252 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 253 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 254 0); 255 256 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 257 int, 258 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 259 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 260 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 261 0) 262 263 struct spdk_bdev { 264 int ut_mock; 265 uint64_t blockcnt; 266 }; 267 268 int 269 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 270 const struct spdk_nvme_transport_id *trid2) 271 { 272 return 0; 273 } 274 275 const char * 276 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 277 { 278 switch (trtype) { 279 case SPDK_NVME_TRANSPORT_PCIE: 280 return "PCIe"; 281 case SPDK_NVME_TRANSPORT_RDMA: 282 return "RDMA"; 283 case SPDK_NVME_TRANSPORT_FC: 284 return "FC"; 285 default: 286 return NULL; 287 } 288 } 289 290 int 291 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 292 { 293 int len, i; 294 295 if (trstring == NULL) { 296 return -EINVAL; 297 } 298 299 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 300 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 301 return -EINVAL; 302 } 303 304 /* cast official trstring to uppercase version of input. */ 305 for (i = 0; i < len; i++) { 306 trid->trstring[i] = toupper(trstring[i]); 307 } 308 return 0; 309 } 310 311 int 312 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 313 { 314 return 0; 315 } 316 317 int 318 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 319 struct spdk_nvmf_transport_poll_group *group, 320 struct spdk_nvmf_transport *transport, 321 uint32_t length) 322 { 323 /* length more than 1 io unit length will fail. */ 324 if (length >= transport->opts.io_unit_size) { 325 return -EINVAL; 326 } 327 328 req->iovcnt = 1; 329 req->iov[0].iov_base = (void *)0xDEADBEEF; 330 331 return 0; 332 } 333 334 335 void 336 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 337 bool dif_insert_or_strip) 338 { 339 uint64_t num_blocks; 340 341 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 342 num_blocks = ns->bdev->blockcnt; 343 nsdata->nsze = num_blocks; 344 nsdata->ncap = num_blocks; 345 nsdata->nuse = num_blocks; 346 nsdata->nlbaf = 0; 347 nsdata->flbas.format = 0; 348 nsdata->flbas.msb_format = 0; 349 nsdata->lbaf[0].lbads = spdk_u32log2(512); 350 } 351 352 const char * 353 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 354 { 355 return subsystem->sn; 356 } 357 358 const char * 359 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 360 { 361 return subsystem->mn; 362 } 363 364 static void 365 test_nvmf_tcp_create(void) 366 { 367 struct spdk_thread *thread; 368 struct spdk_nvmf_transport *transport; 369 struct spdk_nvmf_tcp_transport *ttransport; 370 struct spdk_nvmf_transport_opts opts; 371 372 thread = spdk_thread_create(NULL, NULL); 373 SPDK_CU_ASSERT_FATAL(thread != NULL); 374 spdk_set_thread(thread); 375 376 /* case 1 */ 377 memset(&opts, 0, sizeof(opts)); 378 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 379 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 380 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 381 opts.max_io_size = UT_MAX_IO_SIZE; 382 opts.io_unit_size = UT_IO_UNIT_SIZE; 383 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 384 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 385 /* expect success */ 386 transport = nvmf_tcp_create(&opts); 387 CU_ASSERT_PTR_NOT_NULL(transport); 388 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 389 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 390 transport->opts = opts; 391 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 392 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 393 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 394 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 395 /* destroy transport */ 396 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 397 398 /* case 2 */ 399 memset(&opts, 0, sizeof(opts)); 400 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 401 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 402 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 403 opts.max_io_size = UT_MAX_IO_SIZE; 404 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 405 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 406 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 407 /* expect success */ 408 transport = nvmf_tcp_create(&opts); 409 CU_ASSERT_PTR_NOT_NULL(transport); 410 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 411 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 412 transport->opts = opts; 413 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 414 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 415 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 416 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 417 /* destroy transport */ 418 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 419 420 /* case 3 */ 421 memset(&opts, 0, sizeof(opts)); 422 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 423 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 424 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 425 opts.max_io_size = UT_MAX_IO_SIZE; 426 opts.io_unit_size = 16; 427 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 428 /* expect fails */ 429 transport = nvmf_tcp_create(&opts); 430 CU_ASSERT_PTR_NULL(transport); 431 432 spdk_thread_exit(thread); 433 while (!spdk_thread_is_exited(thread)) { 434 spdk_thread_poll(thread, 0, 0); 435 } 436 spdk_thread_destroy(thread); 437 } 438 439 static void 440 test_nvmf_tcp_destroy(void) 441 { 442 struct spdk_thread *thread; 443 struct spdk_nvmf_transport *transport; 444 struct spdk_nvmf_transport_opts opts; 445 446 thread = spdk_thread_create(NULL, NULL); 447 SPDK_CU_ASSERT_FATAL(thread != NULL); 448 spdk_set_thread(thread); 449 450 /* case 1 */ 451 memset(&opts, 0, sizeof(opts)); 452 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 453 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 454 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 455 opts.max_io_size = UT_MAX_IO_SIZE; 456 opts.io_unit_size = UT_IO_UNIT_SIZE; 457 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 458 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 459 transport = nvmf_tcp_create(&opts); 460 CU_ASSERT_PTR_NOT_NULL(transport); 461 transport->opts = opts; 462 /* destroy transport */ 463 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 464 465 spdk_thread_exit(thread); 466 while (!spdk_thread_is_exited(thread)) { 467 spdk_thread_poll(thread, 0, 0); 468 } 469 spdk_thread_destroy(thread); 470 } 471 472 static void 473 init_accel(void) 474 { 475 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 476 sizeof(int), "accel_p"); 477 } 478 479 static void 480 fini_accel(void) 481 { 482 spdk_io_device_unregister(g_accel_p, NULL); 483 } 484 485 static void 486 test_nvmf_tcp_poll_group_create(void) 487 { 488 struct spdk_nvmf_transport *transport; 489 struct spdk_nvmf_transport_poll_group *group; 490 struct spdk_nvmf_tcp_poll_group *tgroup; 491 struct spdk_thread *thread; 492 struct spdk_nvmf_transport_opts opts; 493 struct spdk_sock_group grp = {}; 494 495 thread = spdk_thread_create(NULL, NULL); 496 SPDK_CU_ASSERT_FATAL(thread != NULL); 497 spdk_set_thread(thread); 498 499 init_accel(); 500 501 memset(&opts, 0, sizeof(opts)); 502 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 503 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 504 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 505 opts.max_io_size = UT_MAX_IO_SIZE; 506 opts.io_unit_size = UT_IO_UNIT_SIZE; 507 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 508 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 509 transport = nvmf_tcp_create(&opts); 510 CU_ASSERT_PTR_NOT_NULL(transport); 511 transport->opts = opts; 512 MOCK_SET(spdk_sock_group_create, &grp); 513 group = nvmf_tcp_poll_group_create(transport, NULL); 514 MOCK_CLEAR_P(spdk_sock_group_create); 515 SPDK_CU_ASSERT_FATAL(group); 516 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 517 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 518 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 519 } 520 group->transport = transport; 521 nvmf_tcp_poll_group_destroy(group); 522 nvmf_tcp_destroy(transport, NULL, NULL); 523 524 fini_accel(); 525 spdk_thread_exit(thread); 526 while (!spdk_thread_is_exited(thread)) { 527 spdk_thread_poll(thread, 0, 0); 528 } 529 spdk_thread_destroy(thread); 530 } 531 532 static void 533 test_nvmf_tcp_send_c2h_data(void) 534 { 535 struct spdk_thread *thread; 536 struct spdk_nvmf_tcp_transport ttransport = {}; 537 struct spdk_nvmf_tcp_qpair tqpair = {}; 538 struct spdk_nvmf_tcp_req tcp_req = {}; 539 struct nvme_tcp_pdu pdu = {}; 540 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 541 542 ttransport.tcp_opts.c2h_success = true; 543 thread = spdk_thread_create(NULL, NULL); 544 SPDK_CU_ASSERT_FATAL(thread != NULL); 545 spdk_set_thread(thread); 546 547 tcp_req.pdu = &pdu; 548 tcp_req.req.length = 300; 549 tcp_req.req.qpair = &tqpair.qpair; 550 551 tqpair.qpair.transport = &ttransport.transport; 552 553 /* Set qpair state to make unrelated operations NOP */ 554 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 555 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 556 557 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 558 559 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 560 tcp_req.req.iov[0].iov_len = 101; 561 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 562 tcp_req.req.iov[1].iov_len = 100; 563 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 564 tcp_req.req.iov[2].iov_len = 99; 565 tcp_req.req.iovcnt = 3; 566 tcp_req.req.length = 300; 567 568 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 569 570 c2h_data = &pdu.hdr.c2h_data; 571 CU_ASSERT(c2h_data->datao == 0); 572 CU_ASSERT(c2h_data->datal = 300); 573 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 574 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 575 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 576 577 CU_ASSERT(pdu.data_iovcnt == 3); 578 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 579 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 580 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 581 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 582 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 583 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 584 585 tcp_req.pdu_in_use = false; 586 tcp_req.rsp.cdw0 = 1; 587 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 588 589 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 590 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 591 592 ttransport.tcp_opts.c2h_success = false; 593 tcp_req.pdu_in_use = false; 594 tcp_req.rsp.cdw0 = 0; 595 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 596 597 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 598 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 599 600 tcp_req.pdu_in_use = false; 601 tcp_req.rsp.cdw0 = 1; 602 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 603 604 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 605 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 606 607 spdk_thread_exit(thread); 608 while (!spdk_thread_is_exited(thread)) { 609 spdk_thread_poll(thread, 0, 0); 610 } 611 spdk_thread_destroy(thread); 612 } 613 614 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 615 616 static void 617 test_nvmf_tcp_h2c_data_hdr_handle(void) 618 { 619 struct spdk_nvmf_tcp_transport ttransport = {}; 620 struct spdk_nvmf_tcp_qpair tqpair = {}; 621 struct nvme_tcp_pdu pdu = {}; 622 struct spdk_nvmf_tcp_req tcp_req = {}; 623 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 624 625 /* Set qpair state to make unrelated operations NOP */ 626 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 627 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 628 tqpair.resource_count = 1; 629 tqpair.reqs = &tcp_req; 630 631 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 632 tcp_req.req.iov[0].iov_len = 101; 633 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 634 tcp_req.req.iov[1].iov_len = 99; 635 tcp_req.req.iovcnt = 2; 636 tcp_req.req.length = 200; 637 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 638 639 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 640 tcp_req.req.cmd->nvme_cmd.cid = 1; 641 tcp_req.ttag = 1; 642 643 h2c_data = &pdu.hdr.h2c_data; 644 h2c_data->cccid = 1; 645 h2c_data->ttag = 1; 646 h2c_data->datao = 0; 647 h2c_data->datal = 200; 648 649 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 650 651 CU_ASSERT(pdu.data_iovcnt == 2); 652 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 653 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 654 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 655 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 656 } 657 658 659 static void 660 test_nvmf_tcp_in_capsule_data_handle(void) 661 { 662 struct spdk_nvmf_tcp_transport ttransport = {}; 663 struct spdk_nvmf_tcp_qpair tqpair = {}; 664 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 665 union nvmf_c2h_msg rsp0 = {}; 666 union nvmf_c2h_msg rsp = {}; 667 668 struct spdk_nvmf_request *req_temp = NULL; 669 struct spdk_nvmf_tcp_req tcp_req2 = {}; 670 struct spdk_nvmf_tcp_req tcp_req1 = {}; 671 672 struct spdk_nvme_tcp_cmd *capsule_data; 673 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 674 struct spdk_nvme_sgl_descriptor *sgl; 675 676 struct spdk_nvmf_transport_poll_group *group; 677 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 678 struct spdk_sock_group grp = {}; 679 680 tqpair.pdu_in_progress = &pdu_in_progress; 681 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 682 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 683 684 tcp_group.sock_group = &grp; 685 TAILQ_INIT(&tcp_group.qpairs); 686 group = &tcp_group.group; 687 group->transport = &ttransport.transport; 688 STAILQ_INIT(&group->pending_buf_queue); 689 tqpair.group = &tcp_group; 690 691 TAILQ_INIT(&tqpair.tcp_req_free_queue); 692 TAILQ_INIT(&tqpair.tcp_req_working_queue); 693 694 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 695 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 696 tqpair.qpair.transport = &ttransport.transport; 697 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 698 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 699 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 700 701 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 702 tcp_req2.req.qpair = &tqpair.qpair; 703 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 704 tcp_req2.req.rsp = &rsp; 705 706 /* init tcp_req1 */ 707 tcp_req1.req.qpair = &tqpair.qpair; 708 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 709 tcp_req1.req.rsp = &rsp0; 710 tcp_req1.state = TCP_REQUEST_STATE_NEW; 711 712 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 713 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 714 715 /* init pdu, make pdu need sgl buff */ 716 pdu = tqpair.pdu_in_progress; 717 capsule_data = &pdu->hdr.capsule_cmd; 718 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 719 sgl = &capsule_data->ccsqe.dptr.sgl1; 720 721 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 722 capsule_data->common.hlen = sizeof(*capsule_data); 723 capsule_data->common.plen = 1096; 724 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 725 726 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 727 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 728 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 729 730 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 731 732 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 733 nvmf_tcp_req_process(&ttransport, &tcp_req1); 734 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 735 736 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 737 738 /* process tqpair capsule req. but we still remain req in pending_buff. */ 739 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 740 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 741 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 742 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 743 if (req_temp == &tcp_req2.req) { 744 break; 745 } 746 } 747 CU_ASSERT(req_temp == NULL); 748 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 749 } 750 751 static void 752 test_nvmf_tcp_qpair_init_mem_resource(void) 753 { 754 int rc; 755 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 756 struct spdk_nvmf_transport transport = {}; 757 struct spdk_thread *thread; 758 759 thread = spdk_thread_create(NULL, NULL); 760 SPDK_CU_ASSERT_FATAL(thread != NULL); 761 spdk_set_thread(thread); 762 763 tqpair = calloc(1, sizeof(*tqpair)); 764 tqpair->qpair.transport = &transport; 765 766 nvmf_tcp_opts_init(&transport.opts); 767 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 768 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 769 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 770 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 771 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 772 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 773 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 774 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 775 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 776 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 777 CU_ASSERT(transport.opts.transport_specific == NULL); 778 779 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 780 CU_ASSERT(rc == 0); 781 CU_ASSERT(tqpair->host_hdgst_enable == true); 782 CU_ASSERT(tqpair->host_ddgst_enable == true); 783 784 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 785 CU_ASSERT(rc == 0); 786 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 787 CU_ASSERT(tqpair->reqs != NULL); 788 CU_ASSERT(tqpair->bufs != NULL); 789 CU_ASSERT(tqpair->pdus != NULL); 790 /* Just to check the first and last entry */ 791 CU_ASSERT(tqpair->reqs[0].ttag == 1); 792 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 793 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 794 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 795 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 796 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 797 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 798 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 799 CU_ASSERT(tqpair->reqs[127].ttag == 128); 800 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 801 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 802 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 803 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 804 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 805 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 806 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 807 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 808 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 809 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 810 CU_ASSERT(tqpair->pdu_in_progress == 811 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 812 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 813 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 814 815 /* Free all of tqpair resource */ 816 nvmf_tcp_qpair_destroy(tqpair); 817 818 spdk_thread_exit(thread); 819 while (!spdk_thread_is_exited(thread)) { 820 spdk_thread_poll(thread, 0, 0); 821 } 822 spdk_thread_destroy(thread); 823 } 824 825 static void 826 test_nvmf_tcp_send_c2h_term_req(void) 827 { 828 struct spdk_nvmf_tcp_qpair tqpair = {}; 829 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 830 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 831 uint32_t error_offset = 1; 832 833 mgmt_pdu.qpair = &tqpair; 834 tqpair.mgmt_pdu = &mgmt_pdu; 835 tqpair.pdu_in_progress = &pdu_in_progress; 836 tqpair.tcp_pdu_working_count = 1; 837 838 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 839 pdu.hdr.common.hlen = 64; 840 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 841 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 842 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 843 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 844 pdu.hdr.common.hlen); 845 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 846 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 847 848 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 849 pdu.hdr.common.hlen = 255; 850 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 851 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 852 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 853 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 854 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 855 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 856 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 857 } 858 859 static void 860 test_nvmf_tcp_send_capsule_resp_pdu(void) 861 { 862 struct spdk_nvmf_tcp_req tcp_req = {}; 863 struct spdk_nvmf_tcp_qpair tqpair = {}; 864 struct nvme_tcp_pdu pdu = {}; 865 866 tcp_req.pdu_in_use = false; 867 tcp_req.req.qpair = &tqpair.qpair; 868 tcp_req.pdu = &pdu; 869 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 870 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 871 tqpair.host_hdgst_enable = true; 872 873 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 874 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 875 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 876 SPDK_NVME_TCP_DIGEST_LEN); 877 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 878 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 879 sizeof(struct spdk_nvme_cpl))); 880 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 881 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 882 CU_ASSERT(pdu.cb_arg == &tcp_req); 883 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 884 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 885 886 /* hdgst disable */ 887 tqpair.host_hdgst_enable = false; 888 tcp_req.pdu_in_use = false; 889 memset(&pdu, 0, sizeof(pdu)); 890 891 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 892 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 893 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 894 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 895 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 896 sizeof(struct spdk_nvme_cpl))); 897 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 898 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 899 CU_ASSERT(pdu.cb_arg == &tcp_req); 900 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 901 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 902 } 903 904 static void 905 test_nvmf_tcp_icreq_handle(void) 906 { 907 struct spdk_nvmf_tcp_transport ttransport = {}; 908 struct spdk_nvmf_tcp_qpair tqpair = {}; 909 struct nvme_tcp_pdu pdu = {}; 910 struct nvme_tcp_pdu mgmt_pdu = {}; 911 struct nvme_tcp_pdu pdu_in_progress = {}; 912 struct spdk_nvme_tcp_ic_resp *ic_resp; 913 914 mgmt_pdu.qpair = &tqpair; 915 tqpair.mgmt_pdu = &mgmt_pdu; 916 tqpair.pdu_in_progress = &pdu_in_progress; 917 tqpair.tcp_pdu_working_count = 1; 918 919 /* case 1: Expected ICReq PFV 0 and got are different. */ 920 pdu.hdr.ic_req.pfv = 1; 921 922 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 923 924 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 925 926 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 927 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 928 929 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 930 931 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 932 933 /* case 3: Expect: PASS. */ 934 ttransport.transport.opts.max_io_size = 32; 935 pdu.hdr.ic_req.pfv = 0; 936 tqpair.host_hdgst_enable = false; 937 tqpair.host_ddgst_enable = false; 938 tqpair.recv_buf_size = 64; 939 pdu.hdr.ic_req.hpda = 16; 940 941 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 942 943 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 944 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 945 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 946 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 947 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 948 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 949 CU_ASSERT(ic_resp->pfv == 0); 950 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 951 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 952 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 953 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 954 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 955 } 956 957 static void 958 test_nvmf_tcp_check_xfer_type(void) 959 { 960 const uint16_t cid = 0xAA; 961 struct spdk_nvmf_tcp_transport ttransport = {}; 962 struct spdk_nvmf_tcp_qpair tqpair = {}; 963 struct nvme_tcp_pdu pdu_in_progress = {}; 964 union nvmf_c2h_msg rsp0 = {}; 965 966 struct spdk_nvmf_tcp_req tcp_req = {}; 967 struct nvme_tcp_pdu rsp_pdu = {}; 968 969 struct spdk_nvme_tcp_cmd *capsule_data; 970 struct spdk_nvme_sgl_descriptor *sgl; 971 972 struct spdk_nvmf_transport_poll_group *group; 973 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 974 struct spdk_sock_group grp = {}; 975 976 tqpair.pdu_in_progress = &pdu_in_progress; 977 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 978 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 979 980 tcp_group.sock_group = &grp; 981 TAILQ_INIT(&tcp_group.qpairs); 982 group = &tcp_group.group; 983 group->transport = &ttransport.transport; 984 STAILQ_INIT(&group->pending_buf_queue); 985 tqpair.group = &tcp_group; 986 987 TAILQ_INIT(&tqpair.tcp_req_free_queue); 988 TAILQ_INIT(&tqpair.tcp_req_working_queue); 989 990 tqpair.qpair.transport = &ttransport.transport; 991 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 992 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 993 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 994 995 /* init tcp_req */ 996 tcp_req.req.qpair = &tqpair.qpair; 997 tcp_req.pdu = &rsp_pdu; 998 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 999 tcp_req.req.rsp = &rsp0; 1000 tcp_req.state = TCP_REQUEST_STATE_NEW; 1001 1002 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1003 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1004 1005 /* init pdu, make pdu need sgl buff */ 1006 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1007 sgl = &capsule_data->ccsqe.dptr.sgl1; 1008 1009 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1010 capsule_data->common.hlen = sizeof(*capsule_data); 1011 capsule_data->common.plen = 1096; 1012 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1013 /* Need to set to a non zero valid to check it gets copied to the response */ 1014 capsule_data->ccsqe.cid = cid; 1015 1016 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1017 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1018 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1019 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1020 1021 /* Process a command and ensure that it fails and the request is set up to return an error */ 1022 nvmf_tcp_req_process(&ttransport, &tcp_req); 1023 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1024 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1025 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1026 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1027 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1028 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1029 } 1030 1031 static void 1032 test_nvmf_tcp_invalid_sgl(void) 1033 { 1034 const uint16_t cid = 0xAABB; 1035 struct spdk_nvmf_tcp_transport ttransport = {}; 1036 struct spdk_nvmf_tcp_qpair tqpair = {}; 1037 struct nvme_tcp_pdu pdu_in_progress = {}; 1038 union nvmf_c2h_msg rsp0 = {}; 1039 1040 struct spdk_nvmf_tcp_req tcp_req = {}; 1041 struct nvme_tcp_pdu rsp_pdu = {}; 1042 struct nvme_tcp_pdu mgmt_pdu = {}; 1043 1044 struct spdk_nvme_tcp_cmd *capsule_data; 1045 struct spdk_nvme_sgl_descriptor *sgl; 1046 1047 struct spdk_nvmf_transport_poll_group *group; 1048 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1049 struct spdk_sock_group grp = {}; 1050 1051 tqpair.pdu_in_progress = &pdu_in_progress; 1052 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1053 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1054 1055 tcp_group.sock_group = &grp; 1056 TAILQ_INIT(&tcp_group.qpairs); 1057 group = &tcp_group.group; 1058 group->transport = &ttransport.transport; 1059 STAILQ_INIT(&group->pending_buf_queue); 1060 tqpair.group = &tcp_group; 1061 1062 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1063 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1064 1065 tqpair.qpair.transport = &ttransport.transport; 1066 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1067 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1068 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1069 1070 /* init tcp_req */ 1071 tcp_req.req.qpair = &tqpair.qpair; 1072 tcp_req.pdu = &rsp_pdu; 1073 tcp_req.pdu->qpair = &tqpair; 1074 tqpair.mgmt_pdu = &mgmt_pdu; 1075 tqpair.mgmt_pdu->qpair = &tqpair; 1076 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1077 tcp_req.req.rsp = &rsp0; 1078 tcp_req.state = TCP_REQUEST_STATE_NEW; 1079 1080 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1081 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1082 1083 /* init pdu, make pdu need sgl buff */ 1084 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1085 sgl = &capsule_data->ccsqe.dptr.sgl1; 1086 1087 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1088 capsule_data->common.hlen = sizeof(*capsule_data); 1089 capsule_data->common.plen = 1096; 1090 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1091 /* Need to set to a non zero valid to check it gets copied to the response */ 1092 capsule_data->ccsqe.cid = cid; 1093 1094 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1095 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1096 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1097 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1098 1099 /* Process a command and ensure that it fails and the request is set up to return an error */ 1100 nvmf_tcp_req_process(&ttransport, &tcp_req); 1101 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1102 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1103 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1104 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1105 } 1106 1107 static void 1108 test_nvmf_tcp_pdu_ch_handle(void) 1109 { 1110 struct spdk_nvmf_tcp_qpair tqpair = {}; 1111 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1112 1113 mgmt_pdu.qpair = &tqpair; 1114 tqpair.mgmt_pdu = &mgmt_pdu; 1115 tqpair.pdu_in_progress = &pdu_in_progress; 1116 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1117 tqpair.cpda = 0; 1118 1119 /* Test case: Already received ICreq PDU. Expect: fail */ 1120 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1121 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1122 nvmf_tcp_pdu_ch_handle(&tqpair); 1123 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1124 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1125 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1126 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1127 1128 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1129 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1130 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1131 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1132 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1133 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1134 nvmf_tcp_pdu_ch_handle(&tqpair); 1135 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1136 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1137 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1138 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1139 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1140 1141 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1142 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1143 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1144 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1145 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1146 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1147 nvmf_tcp_pdu_ch_handle(&tqpair); 1148 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1149 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1150 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1151 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1152 1153 /* Test case: Unexpected PDU type. Expect: fail */ 1154 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1155 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1156 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1157 tqpair.pdu_in_progress->hdr.common.plen = 0; 1158 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1159 nvmf_tcp_pdu_ch_handle(&tqpair); 1160 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1161 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1162 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1163 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1164 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1165 1166 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1167 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1168 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1169 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1170 tqpair.pdu_in_progress->hdr.common.plen = 0; 1171 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1172 nvmf_tcp_pdu_ch_handle(&tqpair); 1173 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1174 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1175 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1176 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1177 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1178 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1179 1180 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1181 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1182 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1183 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1184 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1185 tqpair.pdu_in_progress->hdr.common.plen = 0; 1186 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1187 nvmf_tcp_pdu_ch_handle(&tqpair); 1188 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1189 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1190 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1191 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1192 struct spdk_nvme_tcp_term_req_hdr)); 1193 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1194 1195 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1196 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1197 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1198 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1199 tqpair.pdu_in_progress->hdr.common.plen = 0; 1200 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1201 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1202 nvmf_tcp_pdu_ch_handle(&tqpair); 1203 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1204 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1205 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1206 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1207 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1208 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1209 1210 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1211 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1212 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1213 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1214 tqpair.pdu_in_progress->hdr.common.plen = 0; 1215 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1216 nvmf_tcp_pdu_ch_handle(&tqpair); 1217 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1218 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1219 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1220 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1221 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1222 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1223 1224 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1225 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1226 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1227 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1228 tqpair.cpda = 1; 1229 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1230 tqpair.pdu_in_progress->hdr.common.plen = 0; 1231 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1232 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1233 nvmf_tcp_pdu_ch_handle(&tqpair); 1234 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1235 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1236 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1237 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1238 struct spdk_nvme_tcp_term_req_hdr)); 1239 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1240 1241 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1242 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1243 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1244 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1245 tqpair.cpda = 1; 1246 tqpair.pdu_in_progress->hdr.common.plen = 0; 1247 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1248 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1249 nvmf_tcp_pdu_ch_handle(&tqpair); 1250 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1251 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1252 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1253 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1254 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1255 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1256 1257 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1258 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1259 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1260 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1261 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1262 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1263 nvmf_tcp_pdu_ch_handle(&tqpair); 1264 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1265 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1266 struct spdk_nvme_tcp_common_pdu_hdr)); 1267 } 1268 1269 static void 1270 test_nvmf_tcp_tls_add_remove_credentials(void) 1271 { 1272 struct spdk_thread *thread; 1273 struct spdk_nvmf_transport *transport; 1274 struct spdk_nvmf_tcp_transport *ttransport; 1275 struct spdk_nvmf_transport_opts opts; 1276 struct spdk_nvmf_subsystem subsystem; 1277 struct tcp_psk_entry *entry; 1278 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1279 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1280 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1281 char *psk_file_path = "/tmp/psk.txt"; 1282 bool found = false; 1283 FILE *psk_file = NULL; 1284 mode_t oldmask; 1285 1286 thread = spdk_thread_create(NULL, NULL); 1287 SPDK_CU_ASSERT_FATAL(thread != NULL); 1288 spdk_set_thread(thread); 1289 1290 memset(&opts, 0, sizeof(opts)); 1291 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1292 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1293 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1294 opts.max_io_size = UT_MAX_IO_SIZE; 1295 opts.io_unit_size = UT_IO_UNIT_SIZE; 1296 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1297 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1298 transport = nvmf_tcp_create(&opts); 1299 1300 memset(&subsystem, 0, sizeof(subsystem)); 1301 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1302 1303 /* Create a text file containing PSK in interchange format. */ 1304 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1305 psk_file = fopen(psk_file_path, "w"); 1306 CU_ASSERT(psk_file != NULL); 1307 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1308 CU_ASSERT(fclose(psk_file) == 0); 1309 umask(oldmask); 1310 1311 struct spdk_json_val psk_json[] = { 1312 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1313 {"psk", 3, SPDK_JSON_VAL_NAME}, 1314 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1315 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1316 }; 1317 1318 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1319 1320 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1321 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1322 if ((strcmp(subnqn, entry->subnqn) == 0) && 1323 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1324 found = true; 1325 } 1326 } 1327 1328 CU_ASSERT(found == true); 1329 found = false; 1330 1331 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1332 1333 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1334 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1335 if ((strcmp(subnqn, entry->subnqn) == 0) && 1336 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1337 found = true; 1338 } 1339 } 1340 1341 CU_ASSERT(found == false); 1342 1343 CU_ASSERT(remove(psk_file_path) == 0); 1344 1345 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1346 1347 spdk_thread_exit(thread); 1348 while (!spdk_thread_is_exited(thread)) { 1349 spdk_thread_poll(thread, 0, 0); 1350 } 1351 spdk_thread_destroy(thread); 1352 } 1353 1354 static void 1355 test_nvmf_tcp_tls_generate_psk_id(void) 1356 { 1357 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1358 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1359 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1360 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1361 char too_small_psk_id[5] = {}; 1362 1363 /* Check if we can generate expected PSK id. */ 1364 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1365 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1366 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1367 1368 /* Test with a buffer that is too small to fit PSK id. */ 1369 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1370 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1371 1372 /* Test with unknown cipher suite. */ 1373 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1374 subnqn, UINT8_MAX) != 0); 1375 } 1376 1377 static void 1378 test_nvmf_tcp_tls_generate_retained_psk(void) 1379 { 1380 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1381 const char psk_reference1[] = {"1234567890ABCDEF"}; 1382 const char psk_reference2[] = {"FEDCBA0987654321"}; 1383 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1384 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1385 char *unhexlified1; 1386 char *unhexlified2; 1387 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1388 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1389 uint8_t too_small_psk_retained[5] = {}; 1390 int psk_retained_len1, psk_retained_len2; 1391 int retained_size; 1392 1393 unhexlified1 = spdk_unhexlify(psk_reference1); 1394 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1395 unhexlified2 = spdk_unhexlify(psk_reference2); 1396 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1397 1398 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1399 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1400 free(unhexlified1); 1401 free(unhexlified2); 1402 1403 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1404 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1405 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1406 CU_ASSERT(retained_size > 0); 1407 1408 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1409 psk_retained2, 1410 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1411 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1412 1413 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1414 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1415 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1416 CU_ASSERT(psk_retained_len1 > 0); 1417 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1418 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1419 CU_ASSERT(psk_retained_len2 > 0); 1420 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1421 1422 /* Make sure that passing unknown value as hash errors out the function. */ 1423 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1424 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1425 1426 /* Make sure that passing buffer insufficient in size errors out the function. */ 1427 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1428 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1429 } 1430 1431 static void 1432 test_nvmf_tcp_tls_generate_tls_psk(void) 1433 { 1434 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1435 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1436 const char psk_reference[] = {"1234567890ABCDEF"}; 1437 char *unhexlified; 1438 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1439 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1440 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1441 uint8_t too_small_psk_tls[5] = {}; 1442 int retained_size, tls_size; 1443 1444 unhexlified = spdk_unhexlify(psk_reference); 1445 CU_ASSERT(unhexlified != NULL); 1446 1447 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1448 free(unhexlified); 1449 1450 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1451 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1452 CU_ASSERT(retained_size > 0); 1453 1454 /* Make sure that different cipher suites produce different TLS PSKs. */ 1455 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1456 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1457 CU_ASSERT(tls_size > 0); 1458 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1459 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1460 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1461 1462 /* Make sure that passing unknown value as hash errors out the function. */ 1463 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1464 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1465 1466 /* Make sure that passing buffer insufficient in size errors out the function. */ 1467 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1468 too_small_psk_tls, sizeof(too_small_psk_tls), 1469 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1470 } 1471 1472 int 1473 main(int argc, char **argv) 1474 { 1475 CU_pSuite suite = NULL; 1476 unsigned int num_failures; 1477 1478 CU_initialize_registry(); 1479 1480 suite = CU_add_suite("nvmf", NULL, NULL); 1481 1482 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1483 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1484 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1485 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1486 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1487 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1488 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1489 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1490 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1491 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1492 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1493 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1494 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1495 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1496 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1497 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1498 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1499 1500 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1501 CU_cleanup_registry(); 1502 return num_failures; 1503 } 1504