1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_cunit.h" 10 11 #include "spdk_internal/mock.h" 12 13 #include "common/lib/test_env.c" 14 #include "common/lib/test_sock.c" 15 16 #include "nvmf/ctrlr.c" 17 #include "nvmf/tcp.c" 18 19 #define UT_IPV4_ADDR "192.168.0.1" 20 #define UT_PORT "4420" 21 #define UT_NVMF_ADRFAM_INVALID 0xf 22 #define UT_MAX_QUEUE_DEPTH 128 23 #define UT_MAX_QPAIRS_PER_CTRLR 128 24 #define UT_IN_CAPSULE_DATA_SIZE 1024 25 #define UT_MAX_IO_SIZE 4096 26 #define UT_IO_UNIT_SIZE 1024 27 #define UT_MAX_AQ_DEPTH 64 28 #define UT_SQ_HEAD_MAX 128 29 #define UT_NUM_SHARED_BUFFERS 128 30 31 static void *g_accel_p = (void *)0xdeadbeaf; 32 33 SPDK_LOG_REGISTER_COMPONENT(nvmf) 34 35 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 36 int, 37 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 38 0); 39 40 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 41 int, 42 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 43 0); 44 45 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 46 struct spdk_nvmf_ctrlr *, 47 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 48 NULL); 49 50 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 51 struct spdk_nvmf_subsystem *, 52 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 53 NULL); 54 55 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 56 bool, 57 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 58 true); 59 60 DEFINE_STUB(nvmf_subsystem_find_listener, 61 struct spdk_nvmf_subsystem_listener *, 62 (struct spdk_nvmf_subsystem *subsystem, 63 const struct spdk_nvme_transport_id *trid), 64 (void *)0x1); 65 66 DEFINE_STUB_V(nvmf_get_discovery_log_page, 67 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 68 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 69 70 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 71 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 72 73 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 74 struct spdk_nvmf_ns *, 75 (struct spdk_nvmf_subsystem *subsystem), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 79 struct spdk_nvmf_ns *, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 86 true); 87 88 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 89 bool, 90 (struct spdk_nvmf_ctrlr *ctrlr), 91 false); 92 93 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 94 bool, 95 (struct spdk_nvmf_ctrlr *ctrlr), 96 false); 97 98 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 99 int, 100 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 101 struct spdk_nvmf_request *req), 102 0); 103 104 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 105 int, 106 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 107 struct spdk_nvmf_request *req), 108 0); 109 110 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 111 int, 112 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 113 struct spdk_nvmf_request *req), 114 0); 115 116 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 117 int, 118 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 120 0); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 153 bool, 154 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 155 false); 156 157 DEFINE_STUB(nvmf_transport_req_complete, 158 int, 159 (struct spdk_nvmf_request *req), 160 0); 161 162 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 163 bool, 164 (struct spdk_bdev *bdev), 165 false); 166 167 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 168 int, 169 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 170 struct spdk_nvmf_request *req), 171 0); 172 173 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 174 175 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 176 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 177 struct spdk_nvmf_transport *transport)); 178 179 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 180 int, 181 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 182 0); 183 184 DEFINE_STUB(spdk_sock_group_get_ctx, 185 void *, 186 (struct spdk_sock_group *group), 187 NULL); 188 189 DEFINE_STUB(spdk_sock_set_priority, 190 int, 191 (struct spdk_sock *sock, int priority), 192 0); 193 194 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 195 196 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 197 enum spdk_nvme_transport_type trtype)); 198 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 199 200 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 201 202 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 203 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 204 205 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 206 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 207 208 DEFINE_STUB(nvmf_transport_req_free, 209 int, 210 (struct spdk_nvmf_request *req), 211 0); 212 213 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 214 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 215 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 216 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 217 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 218 219 struct spdk_io_channel * 220 spdk_accel_get_io_channel(void) 221 { 222 return spdk_get_io_channel(g_accel_p); 223 } 224 225 DEFINE_STUB(spdk_accel_submit_crc32cv, 226 int, 227 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 228 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 229 0); 230 231 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 232 int, 233 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 234 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 235 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 236 0) 237 238 struct spdk_bdev { 239 int ut_mock; 240 uint64_t blockcnt; 241 }; 242 243 int 244 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 245 const struct spdk_nvme_transport_id *trid2) 246 { 247 return 0; 248 } 249 250 const char * 251 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 252 { 253 switch (trtype) { 254 case SPDK_NVME_TRANSPORT_PCIE: 255 return "PCIe"; 256 case SPDK_NVME_TRANSPORT_RDMA: 257 return "RDMA"; 258 case SPDK_NVME_TRANSPORT_FC: 259 return "FC"; 260 default: 261 return NULL; 262 } 263 } 264 265 int 266 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 267 { 268 int len, i; 269 270 if (trstring == NULL) { 271 return -EINVAL; 272 } 273 274 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 275 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 276 return -EINVAL; 277 } 278 279 /* cast official trstring to uppercase version of input. */ 280 for (i = 0; i < len; i++) { 281 trid->trstring[i] = toupper(trstring[i]); 282 } 283 return 0; 284 } 285 286 int 287 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 288 { 289 return 0; 290 } 291 292 int 293 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 294 struct spdk_nvmf_transport_poll_group *group, 295 struct spdk_nvmf_transport *transport, 296 uint32_t length) 297 { 298 /* length more than 1 io unit length will fail. */ 299 if (length >= transport->opts.io_unit_size) { 300 return -EINVAL; 301 } 302 303 req->iovcnt = 1; 304 req->iov[0].iov_base = (void *)0xDEADBEEF; 305 306 return 0; 307 } 308 309 310 void 311 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 312 bool dif_insert_or_strip) 313 { 314 uint64_t num_blocks; 315 316 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 317 num_blocks = ns->bdev->blockcnt; 318 nsdata->nsze = num_blocks; 319 nsdata->ncap = num_blocks; 320 nsdata->nuse = num_blocks; 321 nsdata->nlbaf = 0; 322 nsdata->flbas.format = 0; 323 nsdata->lbaf[0].lbads = spdk_u32log2(512); 324 } 325 326 const char * 327 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 328 { 329 return subsystem->sn; 330 } 331 332 const char * 333 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 334 { 335 return subsystem->mn; 336 } 337 338 static void 339 test_nvmf_tcp_create(void) 340 { 341 struct spdk_thread *thread; 342 struct spdk_nvmf_transport *transport; 343 struct spdk_nvmf_tcp_transport *ttransport; 344 struct spdk_nvmf_transport_opts opts; 345 346 thread = spdk_thread_create(NULL, NULL); 347 SPDK_CU_ASSERT_FATAL(thread != NULL); 348 spdk_set_thread(thread); 349 350 /* case 1 */ 351 memset(&opts, 0, sizeof(opts)); 352 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 353 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 354 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 355 opts.max_io_size = UT_MAX_IO_SIZE; 356 opts.io_unit_size = UT_IO_UNIT_SIZE; 357 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 358 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 359 /* expect success */ 360 transport = nvmf_tcp_create(&opts); 361 CU_ASSERT_PTR_NOT_NULL(transport); 362 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 363 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 364 transport->opts = opts; 365 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 366 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 367 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 368 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 369 /* destroy transport */ 370 spdk_mempool_free(ttransport->transport.data_buf_pool); 371 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 372 373 /* case 2 */ 374 memset(&opts, 0, sizeof(opts)); 375 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 376 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 377 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 378 opts.max_io_size = UT_MAX_IO_SIZE; 379 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 380 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 381 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 382 /* expect success */ 383 transport = nvmf_tcp_create(&opts); 384 CU_ASSERT_PTR_NOT_NULL(transport); 385 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 386 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 387 transport->opts = opts; 388 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 389 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 390 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 391 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 392 /* destroy transport */ 393 spdk_mempool_free(ttransport->transport.data_buf_pool); 394 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 395 396 /* case 3 */ 397 memset(&opts, 0, sizeof(opts)); 398 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 399 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 400 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 401 opts.max_io_size = UT_MAX_IO_SIZE; 402 opts.io_unit_size = 16; 403 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 404 /* expect fails */ 405 transport = nvmf_tcp_create(&opts); 406 CU_ASSERT_PTR_NULL(transport); 407 408 spdk_thread_exit(thread); 409 while (!spdk_thread_is_exited(thread)) { 410 spdk_thread_poll(thread, 0, 0); 411 } 412 spdk_thread_destroy(thread); 413 } 414 415 static void 416 test_nvmf_tcp_destroy(void) 417 { 418 struct spdk_thread *thread; 419 struct spdk_nvmf_transport *transport; 420 struct spdk_nvmf_transport_opts opts; 421 422 thread = spdk_thread_create(NULL, NULL); 423 SPDK_CU_ASSERT_FATAL(thread != NULL); 424 spdk_set_thread(thread); 425 426 /* case 1 */ 427 memset(&opts, 0, sizeof(opts)); 428 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 429 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 430 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 431 opts.max_io_size = UT_MAX_IO_SIZE; 432 opts.io_unit_size = UT_IO_UNIT_SIZE; 433 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 434 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 435 transport = nvmf_tcp_create(&opts); 436 CU_ASSERT_PTR_NOT_NULL(transport); 437 transport->opts = opts; 438 /* destroy transport */ 439 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 440 441 spdk_thread_exit(thread); 442 while (!spdk_thread_is_exited(thread)) { 443 spdk_thread_poll(thread, 0, 0); 444 } 445 spdk_thread_destroy(thread); 446 } 447 448 static void 449 init_accel(void) 450 { 451 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 452 sizeof(int), "accel_p"); 453 } 454 455 static void 456 fini_accel(void) 457 { 458 spdk_io_device_unregister(g_accel_p, NULL); 459 } 460 461 static void 462 test_nvmf_tcp_poll_group_create(void) 463 { 464 struct spdk_nvmf_transport *transport; 465 struct spdk_nvmf_transport_poll_group *group; 466 struct spdk_nvmf_tcp_poll_group *tgroup; 467 struct spdk_thread *thread; 468 struct spdk_nvmf_transport_opts opts; 469 struct spdk_sock_group grp = {}; 470 471 thread = spdk_thread_create(NULL, NULL); 472 SPDK_CU_ASSERT_FATAL(thread != NULL); 473 spdk_set_thread(thread); 474 475 init_accel(); 476 477 memset(&opts, 0, sizeof(opts)); 478 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 479 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 480 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 481 opts.max_io_size = UT_MAX_IO_SIZE; 482 opts.io_unit_size = UT_IO_UNIT_SIZE; 483 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 484 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 485 transport = nvmf_tcp_create(&opts); 486 CU_ASSERT_PTR_NOT_NULL(transport); 487 transport->opts = opts; 488 MOCK_SET(spdk_sock_group_create, &grp); 489 group = nvmf_tcp_poll_group_create(transport, NULL); 490 MOCK_CLEAR_P(spdk_sock_group_create); 491 SPDK_CU_ASSERT_FATAL(group); 492 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 493 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 494 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 495 } 496 group->transport = transport; 497 nvmf_tcp_poll_group_destroy(group); 498 nvmf_tcp_destroy(transport, NULL, NULL); 499 500 fini_accel(); 501 spdk_thread_exit(thread); 502 while (!spdk_thread_is_exited(thread)) { 503 spdk_thread_poll(thread, 0, 0); 504 } 505 spdk_thread_destroy(thread); 506 } 507 508 static void 509 test_nvmf_tcp_send_c2h_data(void) 510 { 511 struct spdk_thread *thread; 512 struct spdk_nvmf_tcp_transport ttransport = {}; 513 struct spdk_nvmf_tcp_qpair tqpair = {}; 514 struct spdk_nvmf_tcp_req tcp_req = {}; 515 struct nvme_tcp_pdu pdu = {}; 516 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 517 518 ttransport.tcp_opts.c2h_success = true; 519 thread = spdk_thread_create(NULL, NULL); 520 SPDK_CU_ASSERT_FATAL(thread != NULL); 521 spdk_set_thread(thread); 522 523 tcp_req.pdu = &pdu; 524 tcp_req.req.length = 300; 525 tcp_req.req.qpair = &tqpair.qpair; 526 527 tqpair.qpair.transport = &ttransport.transport; 528 529 /* Set qpair state to make unrelated operations NOP */ 530 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 531 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 532 533 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 534 535 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 536 tcp_req.req.iov[0].iov_len = 101; 537 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 538 tcp_req.req.iov[1].iov_len = 100; 539 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 540 tcp_req.req.iov[2].iov_len = 99; 541 tcp_req.req.iovcnt = 3; 542 tcp_req.req.length = 300; 543 544 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 545 546 c2h_data = &pdu.hdr.c2h_data; 547 CU_ASSERT(c2h_data->datao == 0); 548 CU_ASSERT(c2h_data->datal = 300); 549 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 550 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 551 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 552 553 CU_ASSERT(pdu.data_iovcnt == 3); 554 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 555 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 556 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 557 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 558 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 559 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 560 561 tcp_req.pdu_in_use = false; 562 tcp_req.rsp.cdw0 = 1; 563 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 564 565 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 566 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 567 568 ttransport.tcp_opts.c2h_success = false; 569 tcp_req.pdu_in_use = false; 570 tcp_req.rsp.cdw0 = 0; 571 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 572 573 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 574 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 575 576 tcp_req.pdu_in_use = false; 577 tcp_req.rsp.cdw0 = 1; 578 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 579 580 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 581 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 582 583 spdk_thread_exit(thread); 584 while (!spdk_thread_is_exited(thread)) { 585 spdk_thread_poll(thread, 0, 0); 586 } 587 spdk_thread_destroy(thread); 588 } 589 590 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 591 592 static void 593 test_nvmf_tcp_h2c_data_hdr_handle(void) 594 { 595 struct spdk_nvmf_tcp_transport ttransport = {}; 596 struct spdk_nvmf_tcp_qpair tqpair = {}; 597 struct nvme_tcp_pdu pdu = {}; 598 struct spdk_nvmf_tcp_req tcp_req = {}; 599 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 600 601 /* Set qpair state to make unrelated operations NOP */ 602 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 603 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 604 tqpair.resource_count = 1; 605 tqpair.reqs = &tcp_req; 606 607 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 608 tcp_req.req.iov[0].iov_len = 101; 609 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 610 tcp_req.req.iov[1].iov_len = 99; 611 tcp_req.req.iovcnt = 2; 612 tcp_req.req.length = 200; 613 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 614 615 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 616 tcp_req.req.cmd->nvme_cmd.cid = 1; 617 tcp_req.ttag = 1; 618 619 h2c_data = &pdu.hdr.h2c_data; 620 h2c_data->cccid = 1; 621 h2c_data->ttag = 1; 622 h2c_data->datao = 0; 623 h2c_data->datal = 200; 624 625 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 626 627 CU_ASSERT(pdu.data_iovcnt == 2); 628 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 629 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 630 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 631 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 632 } 633 634 635 static void 636 test_nvmf_tcp_in_capsule_data_handle(void) 637 { 638 struct spdk_nvmf_tcp_transport ttransport = {}; 639 struct spdk_nvmf_tcp_qpair tqpair = {}; 640 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 641 union nvmf_c2h_msg rsp0 = {}; 642 union nvmf_c2h_msg rsp = {}; 643 644 struct spdk_nvmf_request *req_temp = NULL; 645 struct spdk_nvmf_tcp_req tcp_req2 = {}; 646 struct spdk_nvmf_tcp_req tcp_req1 = {}; 647 648 struct spdk_nvme_tcp_cmd *capsule_data; 649 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 650 struct spdk_nvme_sgl_descriptor *sgl; 651 652 struct spdk_nvmf_transport_poll_group *group; 653 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 654 struct spdk_sock_group grp = {}; 655 656 tqpair.pdu_in_progress = &pdu_in_progress; 657 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 658 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 659 660 tcp_group.sock_group = &grp; 661 TAILQ_INIT(&tcp_group.qpairs); 662 group = &tcp_group.group; 663 group->transport = &ttransport.transport; 664 STAILQ_INIT(&group->pending_buf_queue); 665 tqpair.group = &tcp_group; 666 667 TAILQ_INIT(&tqpair.tcp_req_free_queue); 668 TAILQ_INIT(&tqpair.tcp_req_working_queue); 669 670 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 671 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 672 tqpair.qpair.transport = &ttransport.transport; 673 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 674 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 675 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 676 677 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 678 tcp_req2.req.qpair = &tqpair.qpair; 679 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 680 tcp_req2.req.rsp = &rsp; 681 682 /* init tcp_req1 */ 683 tcp_req1.req.qpair = &tqpair.qpair; 684 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 685 tcp_req1.req.rsp = &rsp0; 686 tcp_req1.state = TCP_REQUEST_STATE_NEW; 687 688 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 689 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 690 691 /* init pdu, make pdu need sgl buff */ 692 pdu = tqpair.pdu_in_progress; 693 capsule_data = &pdu->hdr.capsule_cmd; 694 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 695 sgl = &capsule_data->ccsqe.dptr.sgl1; 696 697 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 698 capsule_data->common.hlen = sizeof(*capsule_data); 699 capsule_data->common.plen = 1096; 700 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 701 702 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 703 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 704 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 705 706 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 707 708 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 709 nvmf_tcp_req_process(&ttransport, &tcp_req1); 710 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 711 712 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 713 714 /* process tqpair capsule req. but we still remain req in pending_buff. */ 715 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 716 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 717 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 718 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 719 if (req_temp == &tcp_req2.req) { 720 break; 721 } 722 } 723 CU_ASSERT(req_temp == NULL); 724 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 725 } 726 727 static void 728 test_nvmf_tcp_qpair_init_mem_resource(void) 729 { 730 int rc; 731 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 732 struct spdk_nvmf_transport transport = {}; 733 struct spdk_thread *thread; 734 735 thread = spdk_thread_create(NULL, NULL); 736 SPDK_CU_ASSERT_FATAL(thread != NULL); 737 spdk_set_thread(thread); 738 739 tqpair = calloc(1, sizeof(*tqpair)); 740 tqpair->qpair.transport = &transport; 741 742 nvmf_tcp_opts_init(&transport.opts); 743 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 744 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 745 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 746 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 747 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 748 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH); 749 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 750 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 751 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 752 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 753 CU_ASSERT(transport.opts.transport_specific == NULL); 754 755 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 756 CU_ASSERT(rc == 0); 757 CU_ASSERT(tqpair->host_hdgst_enable == true); 758 CU_ASSERT(tqpair->host_ddgst_enable == true); 759 760 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 761 CU_ASSERT(rc == 0); 762 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 763 CU_ASSERT(tqpair->reqs != NULL); 764 CU_ASSERT(tqpair->bufs != NULL); 765 CU_ASSERT(tqpair->pdus != NULL); 766 /* Just to check the first and last entry */ 767 CU_ASSERT(tqpair->reqs[0].ttag == 1); 768 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 769 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 770 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 771 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 772 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 773 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 774 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 775 CU_ASSERT(tqpair->reqs[127].ttag == 128); 776 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 777 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 778 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 779 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 780 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 781 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 782 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 783 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 784 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH]); 785 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 786 CU_ASSERT(tqpair->pdu_in_progress == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH - 1]); 787 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 788 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 789 790 /* Free all of tqpair resource */ 791 nvmf_tcp_qpair_destroy(tqpair); 792 793 spdk_thread_exit(thread); 794 while (!spdk_thread_is_exited(thread)) { 795 spdk_thread_poll(thread, 0, 0); 796 } 797 spdk_thread_destroy(thread); 798 } 799 800 static void 801 test_nvmf_tcp_send_c2h_term_req(void) 802 { 803 struct spdk_nvmf_tcp_qpair tqpair = {}; 804 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 805 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 806 uint32_t error_offset = 1; 807 808 mgmt_pdu.sgl.total_size = 0; 809 mgmt_pdu.qpair = &tqpair; 810 tqpair.mgmt_pdu = &mgmt_pdu; 811 tqpair.pdu_in_progress = &pdu_in_progress; 812 813 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 814 pdu.hdr.common.hlen = 64; 815 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 816 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 817 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 818 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 819 pdu.hdr.common.hlen); 820 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 821 822 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 823 pdu.hdr.common.hlen = 255; 824 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 825 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 826 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 827 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 828 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 829 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 830 } 831 832 static void 833 test_nvmf_tcp_send_capsule_resp_pdu(void) 834 { 835 struct spdk_nvmf_tcp_req tcp_req = {}; 836 struct spdk_nvmf_tcp_qpair tqpair = {}; 837 struct nvme_tcp_pdu pdu = {}; 838 839 tcp_req.pdu_in_use = false; 840 tcp_req.req.qpair = &tqpair.qpair; 841 tcp_req.pdu = &pdu; 842 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 843 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 844 tqpair.host_hdgst_enable = true; 845 846 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 847 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 848 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 849 SPDK_NVME_TCP_DIGEST_LEN); 850 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 851 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 852 sizeof(struct spdk_nvme_cpl))); 853 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 854 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 855 CU_ASSERT(pdu.cb_arg == &tcp_req); 856 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 857 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 858 859 /* hdgst disable */ 860 tqpair.host_hdgst_enable = false; 861 tcp_req.pdu_in_use = false; 862 memset(&pdu, 0, sizeof(pdu)); 863 864 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 865 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 866 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 867 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 868 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 869 sizeof(struct spdk_nvme_cpl))); 870 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 871 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 872 CU_ASSERT(pdu.cb_arg == &tcp_req); 873 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 874 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 875 } 876 877 static void 878 test_nvmf_tcp_icreq_handle(void) 879 { 880 struct spdk_nvmf_tcp_transport ttransport = {}; 881 struct spdk_nvmf_tcp_qpair tqpair = {}; 882 struct nvme_tcp_pdu pdu = {}; 883 struct nvme_tcp_pdu mgmt_pdu = {}; 884 struct nvme_tcp_pdu pdu_in_progress = {}; 885 struct spdk_nvme_tcp_ic_resp *ic_resp; 886 887 mgmt_pdu.qpair = &tqpair; 888 tqpair.mgmt_pdu = &mgmt_pdu; 889 tqpair.pdu_in_progress = &pdu_in_progress; 890 891 /* case 1: Expected ICReq PFV 0 and got are different. */ 892 pdu.hdr.ic_req.pfv = 1; 893 894 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 895 896 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 897 898 /* case 2: Expect: PASS. */ 899 ttransport.transport.opts.max_io_size = 32; 900 pdu.hdr.ic_req.pfv = 0; 901 tqpair.host_hdgst_enable = false; 902 tqpair.host_ddgst_enable = false; 903 tqpair.recv_buf_size = 64; 904 pdu.hdr.ic_req.hpda = 16; 905 906 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 907 908 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 909 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 910 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 911 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 912 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 913 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 914 CU_ASSERT(ic_resp->pfv == 0); 915 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 916 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 917 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 918 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 919 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 920 } 921 922 static void 923 test_nvmf_tcp_check_xfer_type(void) 924 { 925 const uint16_t cid = 0xAA; 926 struct spdk_nvmf_tcp_transport ttransport = {}; 927 struct spdk_nvmf_tcp_qpair tqpair = {}; 928 struct nvme_tcp_pdu pdu_in_progress = {}; 929 union nvmf_c2h_msg rsp0 = {}; 930 931 struct spdk_nvmf_tcp_req tcp_req = {}; 932 struct nvme_tcp_pdu rsp_pdu = {}; 933 934 struct spdk_nvme_tcp_cmd *capsule_data; 935 struct spdk_nvme_sgl_descriptor *sgl; 936 937 struct spdk_nvmf_transport_poll_group *group; 938 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 939 struct spdk_sock_group grp = {}; 940 941 tqpair.pdu_in_progress = &pdu_in_progress; 942 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 943 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 944 945 tcp_group.sock_group = &grp; 946 TAILQ_INIT(&tcp_group.qpairs); 947 group = &tcp_group.group; 948 group->transport = &ttransport.transport; 949 STAILQ_INIT(&group->pending_buf_queue); 950 tqpair.group = &tcp_group; 951 952 TAILQ_INIT(&tqpair.tcp_req_free_queue); 953 TAILQ_INIT(&tqpair.tcp_req_working_queue); 954 955 tqpair.qpair.transport = &ttransport.transport; 956 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 957 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 958 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 959 960 /* init tcp_req */ 961 tcp_req.req.qpair = &tqpair.qpair; 962 tcp_req.pdu = &rsp_pdu; 963 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 964 tcp_req.req.rsp = &rsp0; 965 tcp_req.state = TCP_REQUEST_STATE_NEW; 966 967 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 968 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 969 970 /* init pdu, make pdu need sgl buff */ 971 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 972 sgl = &capsule_data->ccsqe.dptr.sgl1; 973 974 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 975 capsule_data->common.hlen = sizeof(*capsule_data); 976 capsule_data->common.plen = 1096; 977 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 978 /* Need to set to a non zero valid to check it gets copied to the response */ 979 capsule_data->ccsqe.cid = cid; 980 981 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 982 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 983 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 984 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 985 986 /* Process a command and ensure that it fails and the request is set up to return an error */ 987 nvmf_tcp_req_process(&ttransport, &tcp_req); 988 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 989 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 990 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 991 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 992 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 993 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 994 } 995 996 static void 997 test_nvmf_tcp_invalid_sgl(void) 998 { 999 const uint16_t cid = 0xAABB; 1000 struct spdk_nvmf_tcp_transport ttransport = {}; 1001 struct spdk_nvmf_tcp_qpair tqpair = {}; 1002 struct nvme_tcp_pdu pdu_in_progress = {}; 1003 union nvmf_c2h_msg rsp0 = {}; 1004 1005 struct spdk_nvmf_tcp_req tcp_req = {}; 1006 struct nvme_tcp_pdu rsp_pdu = {}; 1007 1008 struct spdk_nvme_tcp_cmd *capsule_data; 1009 struct spdk_nvme_sgl_descriptor *sgl; 1010 1011 struct spdk_nvmf_transport_poll_group *group; 1012 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1013 struct spdk_sock_group grp = {}; 1014 1015 tqpair.pdu_in_progress = &pdu_in_progress; 1016 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1017 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1018 1019 tcp_group.sock_group = &grp; 1020 TAILQ_INIT(&tcp_group.qpairs); 1021 group = &tcp_group.group; 1022 group->transport = &ttransport.transport; 1023 STAILQ_INIT(&group->pending_buf_queue); 1024 tqpair.group = &tcp_group; 1025 1026 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1027 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1028 1029 tqpair.qpair.transport = &ttransport.transport; 1030 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1031 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1032 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1033 1034 /* init tcp_req */ 1035 tcp_req.req.qpair = &tqpair.qpair; 1036 tcp_req.pdu = &rsp_pdu; 1037 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1038 tcp_req.req.rsp = &rsp0; 1039 tcp_req.state = TCP_REQUEST_STATE_NEW; 1040 1041 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1042 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1043 1044 /* init pdu, make pdu need sgl buff */ 1045 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1046 sgl = &capsule_data->ccsqe.dptr.sgl1; 1047 1048 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1049 capsule_data->common.hlen = sizeof(*capsule_data); 1050 capsule_data->common.plen = 1096; 1051 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1052 /* Need to set to a non zero valid to check it gets copied to the response */ 1053 capsule_data->ccsqe.cid = cid; 1054 1055 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1056 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1057 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1058 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1059 1060 /* Process a command and ensure that it fails and the request is set up to return an error */ 1061 nvmf_tcp_req_process(&ttransport, &tcp_req); 1062 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1063 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1064 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1065 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1066 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1067 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID); 1068 } 1069 1070 static void 1071 test_nvmf_tcp_pdu_ch_handle(void) 1072 { 1073 struct spdk_nvmf_tcp_qpair tqpair = {}; 1074 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1075 1076 mgmt_pdu.qpair = &tqpair; 1077 tqpair.mgmt_pdu = &mgmt_pdu; 1078 tqpair.pdu_in_progress = &pdu_in_progress; 1079 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1080 tqpair.cpda = 0; 1081 1082 /* Test case: Already received ICreq PDU. Expect: fail */ 1083 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1084 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1085 nvmf_tcp_pdu_ch_handle(&tqpair); 1086 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1087 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1088 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1089 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1090 1091 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1092 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1093 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1094 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1095 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1096 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1097 nvmf_tcp_pdu_ch_handle(&tqpair); 1098 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1099 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1100 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1101 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1102 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1103 1104 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1105 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1106 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1107 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1108 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1109 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1110 nvmf_tcp_pdu_ch_handle(&tqpair); 1111 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1112 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1113 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1114 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1115 1116 /* Test case: Unexpected PDU type. Expect: fail */ 1117 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1118 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1119 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1120 tqpair.pdu_in_progress->hdr.common.plen = 0; 1121 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1122 nvmf_tcp_pdu_ch_handle(&tqpair); 1123 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1124 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1125 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1126 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1127 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1128 1129 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1130 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1131 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1132 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1133 tqpair.pdu_in_progress->hdr.common.plen = 0; 1134 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1135 nvmf_tcp_pdu_ch_handle(&tqpair); 1136 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1137 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1138 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1139 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1140 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1141 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1142 1143 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1144 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1145 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1146 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1147 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1148 tqpair.pdu_in_progress->hdr.common.plen = 0; 1149 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1150 nvmf_tcp_pdu_ch_handle(&tqpair); 1151 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1152 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1153 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1154 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1155 struct spdk_nvme_tcp_term_req_hdr)); 1156 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1157 1158 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1159 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1160 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1161 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1162 tqpair.pdu_in_progress->hdr.common.plen = 0; 1163 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1164 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1165 nvmf_tcp_pdu_ch_handle(&tqpair); 1166 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1167 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1168 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1169 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1170 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1171 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1172 1173 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1174 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1175 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1176 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1177 tqpair.pdu_in_progress->hdr.common.plen = 0; 1178 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1179 nvmf_tcp_pdu_ch_handle(&tqpair); 1180 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1181 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1182 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1183 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1184 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1185 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1186 1187 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1188 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1189 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1190 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1191 tqpair.cpda = 1; 1192 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1193 tqpair.pdu_in_progress->hdr.common.plen = 0; 1194 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1195 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1196 nvmf_tcp_pdu_ch_handle(&tqpair); 1197 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1198 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1199 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1200 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1201 struct spdk_nvme_tcp_term_req_hdr)); 1202 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1203 1204 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1205 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1206 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1207 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1208 tqpair.cpda = 1; 1209 tqpair.pdu_in_progress->hdr.common.plen = 0; 1210 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1211 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1212 nvmf_tcp_pdu_ch_handle(&tqpair); 1213 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 1214 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1215 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1216 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1217 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1218 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1219 1220 /* Test case: All parameters is conformed to the functon. Expect: PASS */ 1221 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1222 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1223 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1224 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1225 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1226 nvmf_tcp_pdu_ch_handle(&tqpair); 1227 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1228 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1229 struct spdk_nvme_tcp_common_pdu_hdr)); 1230 } 1231 1232 int 1233 main(int argc, char **argv) 1234 { 1235 CU_pSuite suite = NULL; 1236 unsigned int num_failures; 1237 1238 CU_set_error_action(CUEA_ABORT); 1239 CU_initialize_registry(); 1240 1241 suite = CU_add_suite("nvmf", NULL, NULL); 1242 1243 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1244 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1245 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1246 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1247 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1248 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1249 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1250 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1251 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1252 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1253 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1254 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1255 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1256 1257 CU_basic_set_mode(CU_BRM_VERBOSE); 1258 CU_basic_run_tests(); 1259 num_failures = CU_get_number_of_failures(); 1260 CU_cleanup_registry(); 1261 return num_failures; 1262 } 1263