1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 #include "spdk/nvmf_spec.h" 37 #include "spdk_cunit.h" 38 39 #include "spdk_internal/mock.h" 40 41 #include "common/lib/test_env.c" 42 #include "common/lib/test_sock.c" 43 44 #include "nvmf/ctrlr.c" 45 #include "nvmf/tcp.c" 46 47 #define UT_IPV4_ADDR "192.168.0.1" 48 #define UT_PORT "4420" 49 #define UT_NVMF_ADRFAM_INVALID 0xf 50 #define UT_MAX_QUEUE_DEPTH 128 51 #define UT_MAX_QPAIRS_PER_CTRLR 128 52 #define UT_IN_CAPSULE_DATA_SIZE 1024 53 #define UT_MAX_IO_SIZE 4096 54 #define UT_IO_UNIT_SIZE 1024 55 #define UT_MAX_AQ_DEPTH 64 56 #define UT_SQ_HEAD_MAX 128 57 #define UT_NUM_SHARED_BUFFERS 128 58 59 static void *g_accel_p = (void *)0xdeadbeaf; 60 61 SPDK_LOG_REGISTER_COMPONENT(nvmf) 62 63 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 64 int, 65 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 66 0); 67 68 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 69 int, 70 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 71 0); 72 73 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 74 struct spdk_nvmf_ctrlr *, 75 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 79 struct spdk_nvmf_subsystem *, 80 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 86 true); 87 88 DEFINE_STUB(nvmf_subsystem_find_listener, 89 struct spdk_nvmf_subsystem_listener *, 90 (struct spdk_nvmf_subsystem *subsystem, 91 const struct spdk_nvme_transport_id *trid), 92 (void *)0x1); 93 94 DEFINE_STUB_V(nvmf_get_discovery_log_page, 95 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 96 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 97 98 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 99 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 100 101 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 102 struct spdk_nvmf_ns *, 103 (struct spdk_nvmf_subsystem *subsystem), 104 NULL); 105 106 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 107 struct spdk_nvmf_ns *, 108 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 109 NULL); 110 111 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 112 bool, 113 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 114 true); 115 116 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 117 bool, 118 (struct spdk_nvmf_ctrlr *ctrlr), 119 false); 120 121 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 122 bool, 123 (struct spdk_nvmf_ctrlr *ctrlr), 124 false); 125 126 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 127 int, 128 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 129 struct spdk_nvmf_request *req), 130 0); 131 132 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 133 int, 134 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 135 struct spdk_nvmf_request *req), 136 0); 137 138 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 139 int, 140 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 141 struct spdk_nvmf_request *req), 142 0); 143 144 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 145 int, 146 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 147 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 148 0); 149 150 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 151 int, 152 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 153 struct spdk_nvmf_request *req), 154 0); 155 156 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 157 int, 158 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 159 struct spdk_nvmf_request *req), 160 0); 161 162 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 163 int, 164 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 165 struct spdk_nvmf_request *req), 166 0); 167 168 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 169 int, 170 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 171 struct spdk_nvmf_request *req), 172 0); 173 174 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 175 int, 176 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 177 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 178 0); 179 180 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 181 bool, 182 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 183 false); 184 185 DEFINE_STUB(nvmf_transport_req_complete, 186 int, 187 (struct spdk_nvmf_request *req), 188 0); 189 190 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 191 bool, 192 (struct spdk_bdev *bdev), 193 false); 194 195 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 196 int, 197 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 198 struct spdk_nvmf_request *req), 199 0); 200 201 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 202 203 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 204 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 205 struct spdk_nvmf_transport *transport)); 206 207 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 208 int, 209 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 210 0); 211 212 DEFINE_STUB(spdk_sock_group_get_ctx, 213 void *, 214 (struct spdk_sock_group *group), 215 NULL); 216 217 DEFINE_STUB(spdk_sock_set_priority, 218 int, 219 (struct spdk_sock *sock, int priority), 220 0); 221 222 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 223 224 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 225 enum spdk_nvme_transport_type trtype)); 226 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 227 228 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 229 230 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 231 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 232 233 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 234 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 235 236 DEFINE_STUB(nvmf_transport_req_free, 237 int, 238 (struct spdk_nvmf_request *req), 239 0); 240 241 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 242 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 243 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 244 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 245 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 246 247 struct spdk_io_channel * 248 spdk_accel_engine_get_io_channel(void) 249 { 250 return spdk_get_io_channel(g_accel_p); 251 } 252 253 DEFINE_STUB(spdk_accel_submit_crc32cv, 254 int, 255 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 256 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 257 0); 258 259 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 260 int, 261 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 262 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 263 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 264 0) 265 266 struct spdk_bdev { 267 int ut_mock; 268 uint64_t blockcnt; 269 }; 270 271 int 272 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 273 const struct spdk_nvme_transport_id *trid2) 274 { 275 return 0; 276 } 277 278 const char * 279 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 280 { 281 switch (trtype) { 282 case SPDK_NVME_TRANSPORT_PCIE: 283 return "PCIe"; 284 case SPDK_NVME_TRANSPORT_RDMA: 285 return "RDMA"; 286 case SPDK_NVME_TRANSPORT_FC: 287 return "FC"; 288 default: 289 return NULL; 290 } 291 } 292 293 int 294 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 295 { 296 int len, i; 297 298 if (trstring == NULL) { 299 return -EINVAL; 300 } 301 302 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 303 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 304 return -EINVAL; 305 } 306 307 /* cast official trstring to uppercase version of input. */ 308 for (i = 0; i < len; i++) { 309 trid->trstring[i] = toupper(trstring[i]); 310 } 311 return 0; 312 } 313 314 int 315 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 316 { 317 return 0; 318 } 319 320 int 321 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 322 struct spdk_nvmf_transport_poll_group *group, 323 struct spdk_nvmf_transport *transport, 324 uint32_t length) 325 { 326 /* length more than 1 io unit length will fail. */ 327 if (length >= transport->opts.io_unit_size) { 328 return -EINVAL; 329 } 330 331 req->iovcnt = 1; 332 req->iov[0].iov_base = (void *)0xDEADBEEF; 333 334 return 0; 335 } 336 337 338 void 339 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 340 bool dif_insert_or_strip) 341 { 342 uint64_t num_blocks; 343 344 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 345 num_blocks = ns->bdev->blockcnt; 346 nsdata->nsze = num_blocks; 347 nsdata->ncap = num_blocks; 348 nsdata->nuse = num_blocks; 349 nsdata->nlbaf = 0; 350 nsdata->flbas.format = 0; 351 nsdata->lbaf[0].lbads = spdk_u32log2(512); 352 } 353 354 const char * 355 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 356 { 357 return subsystem->sn; 358 } 359 360 const char * 361 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 362 { 363 return subsystem->mn; 364 } 365 366 static void 367 test_nvmf_tcp_create(void) 368 { 369 struct spdk_thread *thread; 370 struct spdk_nvmf_transport *transport; 371 struct spdk_nvmf_tcp_transport *ttransport; 372 struct spdk_nvmf_transport_opts opts; 373 374 thread = spdk_thread_create(NULL, NULL); 375 SPDK_CU_ASSERT_FATAL(thread != NULL); 376 spdk_set_thread(thread); 377 378 /* case 1 */ 379 memset(&opts, 0, sizeof(opts)); 380 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 381 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 382 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 383 opts.max_io_size = UT_MAX_IO_SIZE; 384 opts.io_unit_size = UT_IO_UNIT_SIZE; 385 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 386 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 387 /* expect success */ 388 transport = nvmf_tcp_create(&opts); 389 CU_ASSERT_PTR_NOT_NULL(transport); 390 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 391 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 392 transport->opts = opts; 393 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 394 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 395 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 396 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 397 /* destroy transport */ 398 spdk_mempool_free(ttransport->transport.data_buf_pool); 399 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 400 401 /* case 2 */ 402 memset(&opts, 0, sizeof(opts)); 403 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 404 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 405 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 406 opts.max_io_size = UT_MAX_IO_SIZE; 407 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 408 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 409 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 410 /* expect success */ 411 transport = nvmf_tcp_create(&opts); 412 CU_ASSERT_PTR_NOT_NULL(transport); 413 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 414 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 415 transport->opts = opts; 416 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 417 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 418 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 419 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 420 /* destroy transport */ 421 spdk_mempool_free(ttransport->transport.data_buf_pool); 422 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 423 424 /* case 3 */ 425 memset(&opts, 0, sizeof(opts)); 426 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 427 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 428 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 429 opts.max_io_size = UT_MAX_IO_SIZE; 430 opts.io_unit_size = 16; 431 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 432 /* expect fails */ 433 transport = nvmf_tcp_create(&opts); 434 CU_ASSERT_PTR_NULL(transport); 435 436 spdk_thread_exit(thread); 437 while (!spdk_thread_is_exited(thread)) { 438 spdk_thread_poll(thread, 0, 0); 439 } 440 spdk_thread_destroy(thread); 441 } 442 443 static void 444 test_nvmf_tcp_destroy(void) 445 { 446 struct spdk_thread *thread; 447 struct spdk_nvmf_transport *transport; 448 struct spdk_nvmf_transport_opts opts; 449 450 thread = spdk_thread_create(NULL, NULL); 451 SPDK_CU_ASSERT_FATAL(thread != NULL); 452 spdk_set_thread(thread); 453 454 /* case 1 */ 455 memset(&opts, 0, sizeof(opts)); 456 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 457 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 458 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 459 opts.max_io_size = UT_MAX_IO_SIZE; 460 opts.io_unit_size = UT_IO_UNIT_SIZE; 461 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 462 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 463 transport = nvmf_tcp_create(&opts); 464 CU_ASSERT_PTR_NOT_NULL(transport); 465 transport->opts = opts; 466 /* destroy transport */ 467 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 468 469 spdk_thread_exit(thread); 470 while (!spdk_thread_is_exited(thread)) { 471 spdk_thread_poll(thread, 0, 0); 472 } 473 spdk_thread_destroy(thread); 474 } 475 476 static void 477 init_accel(void) 478 { 479 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 480 sizeof(int), "accel_p"); 481 } 482 483 static void 484 fini_accel(void) 485 { 486 spdk_io_device_unregister(g_accel_p, NULL); 487 } 488 489 static void 490 test_nvmf_tcp_poll_group_create(void) 491 { 492 struct spdk_nvmf_transport *transport; 493 struct spdk_nvmf_transport_poll_group *group; 494 struct spdk_nvmf_tcp_poll_group *tgroup; 495 struct spdk_thread *thread; 496 struct spdk_nvmf_transport_opts opts; 497 struct spdk_sock_group grp = {}; 498 499 thread = spdk_thread_create(NULL, NULL); 500 SPDK_CU_ASSERT_FATAL(thread != NULL); 501 spdk_set_thread(thread); 502 503 init_accel(); 504 505 memset(&opts, 0, sizeof(opts)); 506 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 507 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 508 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 509 opts.max_io_size = UT_MAX_IO_SIZE; 510 opts.io_unit_size = UT_IO_UNIT_SIZE; 511 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 512 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 513 transport = nvmf_tcp_create(&opts); 514 CU_ASSERT_PTR_NOT_NULL(transport); 515 transport->opts = opts; 516 MOCK_SET(spdk_sock_group_create, &grp); 517 group = nvmf_tcp_poll_group_create(transport, NULL); 518 MOCK_CLEAR_P(spdk_sock_group_create); 519 SPDK_CU_ASSERT_FATAL(group); 520 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 521 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 522 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 523 } 524 group->transport = transport; 525 nvmf_tcp_poll_group_destroy(group); 526 nvmf_tcp_destroy(transport, NULL, NULL); 527 528 fini_accel(); 529 spdk_thread_exit(thread); 530 while (!spdk_thread_is_exited(thread)) { 531 spdk_thread_poll(thread, 0, 0); 532 } 533 spdk_thread_destroy(thread); 534 } 535 536 static void 537 test_nvmf_tcp_send_c2h_data(void) 538 { 539 struct spdk_thread *thread; 540 struct spdk_nvmf_tcp_transport ttransport = {}; 541 struct spdk_nvmf_tcp_qpair tqpair = {}; 542 struct spdk_nvmf_tcp_req tcp_req = {}; 543 struct nvme_tcp_pdu pdu = {}; 544 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 545 546 ttransport.tcp_opts.c2h_success = true; 547 thread = spdk_thread_create(NULL, NULL); 548 SPDK_CU_ASSERT_FATAL(thread != NULL); 549 spdk_set_thread(thread); 550 551 tcp_req.pdu = &pdu; 552 tcp_req.req.length = 300; 553 tcp_req.req.qpair = &tqpair.qpair; 554 555 tqpair.qpair.transport = &ttransport.transport; 556 557 /* Set qpair state to make unrelated operations NOP */ 558 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 559 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 560 561 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 562 563 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 564 tcp_req.req.iov[0].iov_len = 101; 565 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 566 tcp_req.req.iov[1].iov_len = 100; 567 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 568 tcp_req.req.iov[2].iov_len = 99; 569 tcp_req.req.iovcnt = 3; 570 tcp_req.req.length = 300; 571 572 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 573 574 c2h_data = &pdu.hdr.c2h_data; 575 CU_ASSERT(c2h_data->datao == 0); 576 CU_ASSERT(c2h_data->datal = 300); 577 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 578 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 579 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 580 581 CU_ASSERT(pdu.data_iovcnt == 3); 582 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 583 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 584 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 585 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 586 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 587 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 588 589 tcp_req.pdu_in_use = false; 590 tcp_req.rsp.cdw0 = 1; 591 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 592 593 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 594 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 595 596 ttransport.tcp_opts.c2h_success = false; 597 tcp_req.pdu_in_use = false; 598 tcp_req.rsp.cdw0 = 0; 599 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 600 601 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 602 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 603 604 tcp_req.pdu_in_use = false; 605 tcp_req.rsp.cdw0 = 1; 606 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 607 608 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 609 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 610 611 spdk_thread_exit(thread); 612 while (!spdk_thread_is_exited(thread)) { 613 spdk_thread_poll(thread, 0, 0); 614 } 615 spdk_thread_destroy(thread); 616 } 617 618 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 619 620 static void 621 test_nvmf_tcp_h2c_data_hdr_handle(void) 622 { 623 struct spdk_nvmf_tcp_transport ttransport = {}; 624 struct spdk_nvmf_tcp_qpair tqpair = {}; 625 struct nvme_tcp_pdu pdu = {}; 626 struct spdk_nvmf_tcp_req tcp_req = {}; 627 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 628 629 TAILQ_INIT(&tqpair.tcp_req_working_queue); 630 631 /* Set qpair state to make unrelated operations NOP */ 632 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 633 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 634 635 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 636 tcp_req.req.iov[0].iov_len = 101; 637 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 638 tcp_req.req.iov[1].iov_len = 99; 639 tcp_req.req.iovcnt = 2; 640 tcp_req.req.length = 200; 641 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 642 643 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 644 tcp_req.req.cmd->nvme_cmd.cid = 1; 645 tcp_req.ttag = 2; 646 647 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, 648 &tcp_req, state_link); 649 650 h2c_data = &pdu.hdr.h2c_data; 651 h2c_data->cccid = 1; 652 h2c_data->ttag = 2; 653 h2c_data->datao = 0; 654 h2c_data->datal = 200; 655 656 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 657 658 CU_ASSERT(pdu.data_iovcnt == 2); 659 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 660 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 661 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 662 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 663 664 CU_ASSERT(TAILQ_FIRST(&tqpair.tcp_req_working_queue) == 665 &tcp_req); 666 TAILQ_REMOVE(&tqpair.tcp_req_working_queue, 667 &tcp_req, state_link); 668 } 669 670 671 static void 672 test_nvmf_tcp_in_capsule_data_handle(void) 673 { 674 struct spdk_nvmf_tcp_transport ttransport = {}; 675 struct spdk_nvmf_tcp_qpair tqpair = {}; 676 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 677 union nvmf_c2h_msg rsp0 = {}; 678 union nvmf_c2h_msg rsp = {}; 679 680 struct spdk_nvmf_request *req_temp = NULL; 681 struct spdk_nvmf_tcp_req tcp_req2 = {}; 682 struct spdk_nvmf_tcp_req tcp_req1 = {}; 683 684 struct spdk_nvme_tcp_cmd *capsule_data; 685 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 686 struct spdk_nvme_sgl_descriptor *sgl; 687 688 struct spdk_nvmf_transport_poll_group *group; 689 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 690 struct spdk_sock_group grp = {}; 691 692 tqpair.pdu_in_progress = &pdu_in_progress; 693 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 694 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 695 696 tcp_group.sock_group = &grp; 697 TAILQ_INIT(&tcp_group.qpairs); 698 group = &tcp_group.group; 699 group->transport = &ttransport.transport; 700 STAILQ_INIT(&group->pending_buf_queue); 701 tqpair.group = &tcp_group; 702 703 TAILQ_INIT(&tqpair.tcp_req_free_queue); 704 TAILQ_INIT(&tqpair.tcp_req_working_queue); 705 706 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 707 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 708 tqpair.qpair.transport = &ttransport.transport; 709 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 710 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 711 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 712 713 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 714 tcp_req2.req.qpair = &tqpair.qpair; 715 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 716 tcp_req2.req.rsp = &rsp; 717 718 /* init tcp_req1 */ 719 tcp_req1.req.qpair = &tqpair.qpair; 720 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 721 tcp_req1.req.rsp = &rsp0; 722 tcp_req1.state = TCP_REQUEST_STATE_NEW; 723 724 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 725 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 726 727 /* init pdu, make pdu need sgl buff */ 728 pdu = tqpair.pdu_in_progress; 729 capsule_data = &pdu->hdr.capsule_cmd; 730 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 731 sgl = &capsule_data->ccsqe.dptr.sgl1; 732 733 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 734 capsule_data->common.hlen = sizeof(*capsule_data); 735 capsule_data->common.plen = 1096; 736 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 737 738 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 739 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 740 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 741 742 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 743 744 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 745 nvmf_tcp_req_process(&ttransport, &tcp_req1); 746 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 747 748 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 749 750 /* process tqpair capsule req. but we still remain req in pending_buff. */ 751 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 752 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 753 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 754 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 755 if (req_temp == &tcp_req2.req) { 756 break; 757 } 758 } 759 CU_ASSERT(req_temp == NULL); 760 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 761 } 762 763 static void 764 test_nvmf_tcp_qpair_init_mem_resource(void) 765 { 766 int rc; 767 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 768 struct spdk_nvmf_transport transport = {}; 769 struct spdk_thread *thread; 770 771 thread = spdk_thread_create(NULL, NULL); 772 SPDK_CU_ASSERT_FATAL(thread != NULL); 773 spdk_set_thread(thread); 774 775 tqpair = calloc(1, sizeof(*tqpair)); 776 tqpair->qpair.transport = &transport; 777 778 nvmf_tcp_opts_init(&transport.opts); 779 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 780 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 781 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 782 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 783 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 784 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH); 785 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 786 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 787 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 788 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 789 CU_ASSERT(transport.opts.transport_specific == NULL); 790 791 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 792 CU_ASSERT(rc == 0); 793 CU_ASSERT(tqpair->host_hdgst_enable == true); 794 CU_ASSERT(tqpair->host_ddgst_enable == true); 795 796 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 797 CU_ASSERT(rc == 0); 798 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 799 CU_ASSERT(tqpair->reqs != NULL); 800 CU_ASSERT(tqpair->bufs != NULL); 801 CU_ASSERT(tqpair->pdus != NULL); 802 /* Just to check the first and last entry */ 803 CU_ASSERT(tqpair->reqs[0].ttag == 1); 804 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 805 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 806 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 807 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 808 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 809 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 810 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 811 CU_ASSERT(tqpair->reqs[127].ttag == 128); 812 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 813 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 814 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 815 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 816 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 817 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 818 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 819 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 820 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH]); 821 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 822 CU_ASSERT(tqpair->pdu_in_progress == &tqpair->pdus[SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH + 1]); 823 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 824 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 825 826 /* Free all of tqpair resource */ 827 nvmf_tcp_qpair_destroy(tqpair); 828 829 spdk_thread_exit(thread); 830 while (!spdk_thread_is_exited(thread)) { 831 spdk_thread_poll(thread, 0, 0); 832 } 833 spdk_thread_destroy(thread); 834 } 835 836 static void 837 test_nvmf_tcp_send_c2h_term_req(void) 838 { 839 struct spdk_nvmf_tcp_qpair tqpair = {}; 840 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 841 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 842 uint32_t error_offset = 1; 843 844 mgmt_pdu.sgl.total_size = 0; 845 mgmt_pdu.qpair = &tqpair; 846 tqpair.mgmt_pdu = &mgmt_pdu; 847 tqpair.pdu_in_progress = &pdu_in_progress; 848 849 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 850 pdu.hdr.common.hlen = 64; 851 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 852 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 853 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 854 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 855 pdu.hdr.common.hlen); 856 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 857 858 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 859 pdu.hdr.common.hlen = 255; 860 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 861 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 862 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 863 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 864 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 865 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 866 } 867 868 static void 869 test_nvmf_tcp_send_capsule_resp_pdu(void) 870 { 871 struct spdk_nvmf_tcp_req tcp_req = {}; 872 struct spdk_nvmf_tcp_qpair tqpair = {}; 873 struct nvme_tcp_pdu pdu = {}; 874 875 tcp_req.pdu_in_use = false; 876 tcp_req.req.qpair = &tqpair.qpair; 877 tcp_req.pdu = &pdu; 878 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 879 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 880 tqpair.host_hdgst_enable = true; 881 882 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 883 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 884 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 885 SPDK_NVME_TCP_DIGEST_LEN); 886 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 887 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 888 sizeof(struct spdk_nvme_cpl))); 889 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 890 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 891 CU_ASSERT(pdu.cb_arg == &tcp_req); 892 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 893 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 894 895 /* hdgst disable */ 896 tqpair.host_hdgst_enable = false; 897 tcp_req.pdu_in_use = false; 898 memset(&pdu, 0, sizeof(pdu)); 899 900 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 901 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 902 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 903 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 904 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 905 sizeof(struct spdk_nvme_cpl))); 906 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 907 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 908 CU_ASSERT(pdu.cb_arg == &tcp_req); 909 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 910 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 911 } 912 913 static void 914 test_nvmf_tcp_icreq_handle(void) 915 { 916 struct spdk_nvmf_tcp_transport ttransport = {}; 917 struct spdk_nvmf_tcp_qpair tqpair = {}; 918 struct nvme_tcp_pdu pdu = {}; 919 struct nvme_tcp_pdu mgmt_pdu = {}; 920 struct nvme_tcp_pdu pdu_in_progress = {}; 921 struct spdk_nvme_tcp_ic_resp *ic_resp; 922 923 mgmt_pdu.qpair = &tqpair; 924 tqpair.mgmt_pdu = &mgmt_pdu; 925 tqpair.pdu_in_progress = &pdu_in_progress; 926 927 /* case 1: Expected ICReq PFV 0 and got are different. */ 928 pdu.hdr.ic_req.pfv = 1; 929 930 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 931 932 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 933 934 /* case 2: Expect: PASS. */ 935 ttransport.transport.opts.max_io_size = 32; 936 pdu.hdr.ic_req.pfv = 0; 937 tqpair.host_hdgst_enable = false; 938 tqpair.host_ddgst_enable = false; 939 tqpair.recv_buf_size = 64; 940 pdu.hdr.ic_req.hpda = 16; 941 942 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 943 944 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 945 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 946 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 947 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 948 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 949 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 950 CU_ASSERT(ic_resp->pfv == 0); 951 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 952 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 953 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 954 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 955 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 956 } 957 958 static void 959 test_nvmf_tcp_check_xfer_type(void) 960 { 961 const uint16_t cid = 0xAA; 962 struct spdk_nvmf_tcp_transport ttransport = {}; 963 struct spdk_nvmf_tcp_qpair tqpair = {}; 964 struct nvme_tcp_pdu pdu_in_progress = {}; 965 union nvmf_c2h_msg rsp0 = {}; 966 967 struct spdk_nvmf_tcp_req tcp_req = {}; 968 struct nvme_tcp_pdu rsp_pdu = {}; 969 970 struct spdk_nvme_tcp_cmd *capsule_data; 971 struct spdk_nvme_sgl_descriptor *sgl; 972 973 struct spdk_nvmf_transport_poll_group *group; 974 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 975 struct spdk_sock_group grp = {}; 976 977 tqpair.pdu_in_progress = &pdu_in_progress; 978 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 979 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 980 981 tcp_group.sock_group = &grp; 982 TAILQ_INIT(&tcp_group.qpairs); 983 group = &tcp_group.group; 984 group->transport = &ttransport.transport; 985 STAILQ_INIT(&group->pending_buf_queue); 986 tqpair.group = &tcp_group; 987 988 TAILQ_INIT(&tqpair.tcp_req_free_queue); 989 TAILQ_INIT(&tqpair.tcp_req_working_queue); 990 991 tqpair.qpair.transport = &ttransport.transport; 992 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 993 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 994 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 995 996 /* init tcp_req */ 997 tcp_req.req.qpair = &tqpair.qpair; 998 tcp_req.pdu = &rsp_pdu; 999 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1000 tcp_req.req.rsp = &rsp0; 1001 tcp_req.state = TCP_REQUEST_STATE_NEW; 1002 1003 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1004 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1005 1006 /* init pdu, make pdu need sgl buff */ 1007 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1008 sgl = &capsule_data->ccsqe.dptr.sgl1; 1009 1010 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1011 capsule_data->common.hlen = sizeof(*capsule_data); 1012 capsule_data->common.plen = 1096; 1013 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1014 /* Need to set to a non zero valid to check it gets copied to the response */ 1015 capsule_data->ccsqe.cid = cid; 1016 1017 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1018 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1019 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1020 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1021 1022 /* Process a command and ensure that it fails and the request is set up to return an error */ 1023 nvmf_tcp_req_process(&ttransport, &tcp_req); 1024 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1025 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1026 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1027 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1028 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1029 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1030 } 1031 1032 static void 1033 test_nvmf_tcp_invalid_sgl(void) 1034 { 1035 const uint16_t cid = 0xAABB; 1036 struct spdk_nvmf_tcp_transport ttransport = {}; 1037 struct spdk_nvmf_tcp_qpair tqpair = {}; 1038 struct nvme_tcp_pdu pdu_in_progress = {}; 1039 union nvmf_c2h_msg rsp0 = {}; 1040 1041 struct spdk_nvmf_tcp_req tcp_req = {}; 1042 struct nvme_tcp_pdu rsp_pdu = {}; 1043 1044 struct spdk_nvme_tcp_cmd *capsule_data; 1045 struct spdk_nvme_sgl_descriptor *sgl; 1046 1047 struct spdk_nvmf_transport_poll_group *group; 1048 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1049 struct spdk_sock_group grp = {}; 1050 1051 tqpair.pdu_in_progress = &pdu_in_progress; 1052 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1053 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1054 1055 tcp_group.sock_group = &grp; 1056 TAILQ_INIT(&tcp_group.qpairs); 1057 group = &tcp_group.group; 1058 group->transport = &ttransport.transport; 1059 STAILQ_INIT(&group->pending_buf_queue); 1060 tqpair.group = &tcp_group; 1061 1062 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1063 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1064 1065 tqpair.qpair.transport = &ttransport.transport; 1066 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1067 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1068 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1069 1070 /* init tcp_req */ 1071 tcp_req.req.qpair = &tqpair.qpair; 1072 tcp_req.pdu = &rsp_pdu; 1073 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1074 tcp_req.req.rsp = &rsp0; 1075 tcp_req.state = TCP_REQUEST_STATE_NEW; 1076 1077 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1078 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1079 1080 /* init pdu, make pdu need sgl buff */ 1081 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1082 sgl = &capsule_data->ccsqe.dptr.sgl1; 1083 1084 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1085 capsule_data->common.hlen = sizeof(*capsule_data); 1086 capsule_data->common.plen = 1096; 1087 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1088 /* Need to set to a non zero valid to check it gets copied to the response */ 1089 capsule_data->ccsqe.cid = cid; 1090 1091 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1092 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1093 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1094 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1095 1096 /* Process a command and ensure that it fails and the request is set up to return an error */ 1097 nvmf_tcp_req_process(&ttransport, &tcp_req); 1098 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1099 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1100 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1101 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1102 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1103 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID); 1104 } 1105 1106 static void 1107 test_nvmf_tcp_pdu_ch_handle(void) 1108 { 1109 struct spdk_nvmf_tcp_qpair tqpair = {}; 1110 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1111 1112 mgmt_pdu.qpair = &tqpair; 1113 tqpair.mgmt_pdu = &mgmt_pdu; 1114 tqpair.pdu_in_progress = &pdu_in_progress; 1115 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1116 tqpair.cpda = 0; 1117 1118 /* Test case: Already received ICreq PDU. Expect: fail */ 1119 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1120 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1121 nvmf_tcp_pdu_ch_handle(&tqpair); 1122 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1123 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1124 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1125 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1126 1127 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1128 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1129 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1130 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1131 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1132 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1133 nvmf_tcp_pdu_ch_handle(&tqpair); 1134 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1135 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1136 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1137 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1138 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1139 1140 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1141 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1142 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1143 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1144 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1145 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1146 nvmf_tcp_pdu_ch_handle(&tqpair); 1147 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1148 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1149 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1150 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1151 1152 /* Test case: Unexpected PDU type. Expect: fail */ 1153 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1154 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1155 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1156 tqpair.pdu_in_progress->hdr.common.plen = 0; 1157 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1158 nvmf_tcp_pdu_ch_handle(&tqpair); 1159 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1160 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1161 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1162 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1163 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1164 1165 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1166 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1167 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1168 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1169 tqpair.pdu_in_progress->hdr.common.plen = 0; 1170 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1171 nvmf_tcp_pdu_ch_handle(&tqpair); 1172 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1173 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1174 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1175 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1176 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1177 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1178 1179 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1180 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1181 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1182 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1183 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1184 tqpair.pdu_in_progress->hdr.common.plen = 0; 1185 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1186 nvmf_tcp_pdu_ch_handle(&tqpair); 1187 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1188 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1189 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1190 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1191 struct spdk_nvme_tcp_term_req_hdr)); 1192 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1193 1194 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1195 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1196 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1197 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1198 tqpair.pdu_in_progress->hdr.common.plen = 0; 1199 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1200 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1201 nvmf_tcp_pdu_ch_handle(&tqpair); 1202 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1203 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1204 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1205 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1206 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1207 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1208 1209 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1210 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1211 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1212 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1213 tqpair.pdu_in_progress->hdr.common.plen = 0; 1214 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1215 nvmf_tcp_pdu_ch_handle(&tqpair); 1216 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1217 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1218 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1219 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1220 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1221 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1222 1223 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1224 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1225 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1226 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1227 tqpair.cpda = 1; 1228 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1229 tqpair.pdu_in_progress->hdr.common.plen = 0; 1230 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1231 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1232 nvmf_tcp_pdu_ch_handle(&tqpair); 1233 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1234 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1235 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1236 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1237 struct spdk_nvme_tcp_term_req_hdr)); 1238 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1239 1240 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1241 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1242 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1243 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1244 tqpair.cpda = 1; 1245 tqpair.pdu_in_progress->hdr.common.plen = 0; 1246 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1247 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1248 nvmf_tcp_pdu_ch_handle(&tqpair); 1249 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1250 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1251 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1252 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1253 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1254 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1255 1256 /* Test case: All parameters is conformed to the functon. Expect: PASS */ 1257 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1258 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1259 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1260 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1261 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1262 nvmf_tcp_pdu_ch_handle(&tqpair); 1263 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1264 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1265 struct spdk_nvme_tcp_common_pdu_hdr)); 1266 } 1267 1268 int main(int argc, char **argv) 1269 { 1270 CU_pSuite suite = NULL; 1271 unsigned int num_failures; 1272 1273 CU_set_error_action(CUEA_ABORT); 1274 CU_initialize_registry(); 1275 1276 suite = CU_add_suite("nvmf", NULL, NULL); 1277 1278 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1279 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1280 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1281 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1282 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1283 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1284 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1285 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1286 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1287 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1288 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1289 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1290 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1291 1292 CU_basic_set_mode(CU_BRM_VERBOSE); 1293 CU_basic_run_tests(); 1294 num_failures = CU_get_number_of_failures(); 1295 CU_cleanup_registry(); 1296 return num_failures; 1297 } 1298