1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "spdk_internal/mock.h" 13 14 #include "common/lib/test_env.c" 15 #include "common/lib/test_sock.c" 16 17 #include "nvmf/ctrlr.c" 18 #include "nvmf/tcp.c" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 41 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 42 int, 43 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 44 0); 45 46 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 47 struct spdk_nvmf_ctrlr *, 48 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 49 NULL); 50 51 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 52 struct spdk_nvmf_subsystem *, 53 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 54 NULL); 55 56 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 57 bool, 58 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 59 true); 60 61 DEFINE_STUB(nvmf_subsystem_find_listener, 62 struct spdk_nvmf_subsystem_listener *, 63 (struct spdk_nvmf_subsystem *subsystem, 64 const struct spdk_nvme_transport_id *trid), 65 (void *)0x1); 66 67 DEFINE_STUB_V(nvmf_get_discovery_log_page, 68 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 69 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 70 71 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 72 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 73 74 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 75 struct spdk_nvmf_ns *, 76 (struct spdk_nvmf_subsystem *subsystem), 77 NULL); 78 79 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 80 struct spdk_nvmf_ns *, 81 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 82 NULL); 83 84 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 85 bool, 86 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 87 true); 88 89 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 90 bool, 91 (struct spdk_nvmf_ctrlr *ctrlr), 92 false); 93 94 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 95 bool, 96 (struct spdk_nvmf_ctrlr *ctrlr), 97 false); 98 99 DEFINE_STUB(nvmf_ctrlr_copy_supported, 100 bool, 101 (struct spdk_nvmf_ctrlr *ctrlr), 102 false); 103 104 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 105 int, 106 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 107 struct spdk_nvmf_request *req), 108 0); 109 110 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 111 int, 112 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 113 struct spdk_nvmf_request *req), 114 0); 115 116 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 117 int, 118 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct spdk_nvmf_request *req), 120 0); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 162 0); 163 164 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 165 bool, 166 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 167 false); 168 169 DEFINE_STUB(nvmf_transport_req_complete, 170 int, 171 (struct spdk_nvmf_request *req), 172 0); 173 174 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 175 bool, 176 (struct spdk_bdev *bdev), 177 false); 178 179 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 180 int, 181 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 182 struct spdk_nvmf_request *req), 183 0); 184 185 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 186 187 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 188 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 189 struct spdk_nvmf_transport *transport)); 190 191 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 192 int, 193 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 194 0); 195 196 DEFINE_STUB(spdk_sock_group_get_ctx, 197 void *, 198 (struct spdk_sock_group *group), 199 NULL); 200 201 DEFINE_STUB(spdk_sock_set_priority, 202 int, 203 (struct spdk_sock *sock, int priority), 204 0); 205 206 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 207 208 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 209 enum spdk_nvme_transport_type trtype)); 210 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 211 212 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 213 214 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 215 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 216 217 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 218 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 219 220 DEFINE_STUB(nvmf_transport_req_free, 221 int, 222 (struct spdk_nvmf_request *req), 223 0); 224 225 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 226 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 227 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 228 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 229 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 230 231 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 232 (const struct spdk_bdev *bdev), 0); 233 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 234 (const struct spdk_bdev *bdev), 0); 235 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 236 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 237 238 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 239 (const struct spdk_nvme_ns_data *nsdata), 0); 240 241 struct spdk_io_channel * 242 spdk_accel_get_io_channel(void) 243 { 244 return spdk_get_io_channel(g_accel_p); 245 } 246 247 DEFINE_STUB(spdk_accel_submit_crc32cv, 248 int, 249 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 250 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 251 0); 252 253 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 254 int, 255 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 256 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 257 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 258 0) 259 260 struct spdk_bdev { 261 int ut_mock; 262 uint64_t blockcnt; 263 }; 264 265 int 266 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 267 const struct spdk_nvme_transport_id *trid2) 268 { 269 return 0; 270 } 271 272 const char * 273 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 274 { 275 switch (trtype) { 276 case SPDK_NVME_TRANSPORT_PCIE: 277 return "PCIe"; 278 case SPDK_NVME_TRANSPORT_RDMA: 279 return "RDMA"; 280 case SPDK_NVME_TRANSPORT_FC: 281 return "FC"; 282 default: 283 return NULL; 284 } 285 } 286 287 int 288 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 289 { 290 int len, i; 291 292 if (trstring == NULL) { 293 return -EINVAL; 294 } 295 296 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 297 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 298 return -EINVAL; 299 } 300 301 /* cast official trstring to uppercase version of input. */ 302 for (i = 0; i < len; i++) { 303 trid->trstring[i] = toupper(trstring[i]); 304 } 305 return 0; 306 } 307 308 int 309 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 310 { 311 return 0; 312 } 313 314 int 315 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 316 struct spdk_nvmf_transport_poll_group *group, 317 struct spdk_nvmf_transport *transport, 318 uint32_t length) 319 { 320 /* length more than 1 io unit length will fail. */ 321 if (length >= transport->opts.io_unit_size) { 322 return -EINVAL; 323 } 324 325 req->iovcnt = 1; 326 req->iov[0].iov_base = (void *)0xDEADBEEF; 327 328 return 0; 329 } 330 331 332 void 333 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 334 bool dif_insert_or_strip) 335 { 336 uint64_t num_blocks; 337 338 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 339 num_blocks = ns->bdev->blockcnt; 340 nsdata->nsze = num_blocks; 341 nsdata->ncap = num_blocks; 342 nsdata->nuse = num_blocks; 343 nsdata->nlbaf = 0; 344 nsdata->flbas.format = 0; 345 nsdata->flbas.msb_format = 0; 346 nsdata->lbaf[0].lbads = spdk_u32log2(512); 347 } 348 349 const char * 350 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 351 { 352 return subsystem->sn; 353 } 354 355 const char * 356 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 357 { 358 return subsystem->mn; 359 } 360 361 static void 362 test_nvmf_tcp_create(void) 363 { 364 struct spdk_thread *thread; 365 struct spdk_nvmf_transport *transport; 366 struct spdk_nvmf_tcp_transport *ttransport; 367 struct spdk_nvmf_transport_opts opts; 368 369 thread = spdk_thread_create(NULL, NULL); 370 SPDK_CU_ASSERT_FATAL(thread != NULL); 371 spdk_set_thread(thread); 372 373 /* case 1 */ 374 memset(&opts, 0, sizeof(opts)); 375 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 376 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 377 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 378 opts.max_io_size = UT_MAX_IO_SIZE; 379 opts.io_unit_size = UT_IO_UNIT_SIZE; 380 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 381 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 382 /* expect success */ 383 transport = nvmf_tcp_create(&opts); 384 CU_ASSERT_PTR_NOT_NULL(transport); 385 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 386 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 387 transport->opts = opts; 388 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 389 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 390 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 391 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 392 /* destroy transport */ 393 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 394 395 /* case 2 */ 396 memset(&opts, 0, sizeof(opts)); 397 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 398 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 399 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 400 opts.max_io_size = UT_MAX_IO_SIZE; 401 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 402 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 403 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 404 /* expect success */ 405 transport = nvmf_tcp_create(&opts); 406 CU_ASSERT_PTR_NOT_NULL(transport); 407 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 408 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 409 transport->opts = opts; 410 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 411 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 412 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 413 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 414 /* destroy transport */ 415 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 416 417 /* case 3 */ 418 memset(&opts, 0, sizeof(opts)); 419 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 420 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 421 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 422 opts.max_io_size = UT_MAX_IO_SIZE; 423 opts.io_unit_size = 16; 424 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 425 /* expect fails */ 426 transport = nvmf_tcp_create(&opts); 427 CU_ASSERT_PTR_NULL(transport); 428 429 spdk_thread_exit(thread); 430 while (!spdk_thread_is_exited(thread)) { 431 spdk_thread_poll(thread, 0, 0); 432 } 433 spdk_thread_destroy(thread); 434 } 435 436 static void 437 test_nvmf_tcp_destroy(void) 438 { 439 struct spdk_thread *thread; 440 struct spdk_nvmf_transport *transport; 441 struct spdk_nvmf_transport_opts opts; 442 443 thread = spdk_thread_create(NULL, NULL); 444 SPDK_CU_ASSERT_FATAL(thread != NULL); 445 spdk_set_thread(thread); 446 447 /* case 1 */ 448 memset(&opts, 0, sizeof(opts)); 449 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 450 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 451 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 452 opts.max_io_size = UT_MAX_IO_SIZE; 453 opts.io_unit_size = UT_IO_UNIT_SIZE; 454 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 455 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 456 transport = nvmf_tcp_create(&opts); 457 CU_ASSERT_PTR_NOT_NULL(transport); 458 transport->opts = opts; 459 /* destroy transport */ 460 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 461 462 spdk_thread_exit(thread); 463 while (!spdk_thread_is_exited(thread)) { 464 spdk_thread_poll(thread, 0, 0); 465 } 466 spdk_thread_destroy(thread); 467 } 468 469 static void 470 init_accel(void) 471 { 472 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 473 sizeof(int), "accel_p"); 474 } 475 476 static void 477 fini_accel(void) 478 { 479 spdk_io_device_unregister(g_accel_p, NULL); 480 } 481 482 static void 483 test_nvmf_tcp_poll_group_create(void) 484 { 485 struct spdk_nvmf_transport *transport; 486 struct spdk_nvmf_transport_poll_group *group; 487 struct spdk_nvmf_tcp_poll_group *tgroup; 488 struct spdk_thread *thread; 489 struct spdk_nvmf_transport_opts opts; 490 struct spdk_sock_group grp = {}; 491 492 thread = spdk_thread_create(NULL, NULL); 493 SPDK_CU_ASSERT_FATAL(thread != NULL); 494 spdk_set_thread(thread); 495 496 init_accel(); 497 498 memset(&opts, 0, sizeof(opts)); 499 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 500 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 501 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 502 opts.max_io_size = UT_MAX_IO_SIZE; 503 opts.io_unit_size = UT_IO_UNIT_SIZE; 504 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 505 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 506 transport = nvmf_tcp_create(&opts); 507 CU_ASSERT_PTR_NOT_NULL(transport); 508 transport->opts = opts; 509 MOCK_SET(spdk_sock_group_create, &grp); 510 group = nvmf_tcp_poll_group_create(transport, NULL); 511 MOCK_CLEAR_P(spdk_sock_group_create); 512 SPDK_CU_ASSERT_FATAL(group); 513 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 514 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 515 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 516 } 517 group->transport = transport; 518 nvmf_tcp_poll_group_destroy(group); 519 nvmf_tcp_destroy(transport, NULL, NULL); 520 521 fini_accel(); 522 spdk_thread_exit(thread); 523 while (!spdk_thread_is_exited(thread)) { 524 spdk_thread_poll(thread, 0, 0); 525 } 526 spdk_thread_destroy(thread); 527 } 528 529 static void 530 test_nvmf_tcp_send_c2h_data(void) 531 { 532 struct spdk_thread *thread; 533 struct spdk_nvmf_tcp_transport ttransport = {}; 534 struct spdk_nvmf_tcp_qpair tqpair = {}; 535 struct spdk_nvmf_tcp_req tcp_req = {}; 536 struct nvme_tcp_pdu pdu = {}; 537 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 538 539 ttransport.tcp_opts.c2h_success = true; 540 thread = spdk_thread_create(NULL, NULL); 541 SPDK_CU_ASSERT_FATAL(thread != NULL); 542 spdk_set_thread(thread); 543 544 tcp_req.pdu = &pdu; 545 tcp_req.req.length = 300; 546 tcp_req.req.qpair = &tqpair.qpair; 547 548 tqpair.qpair.transport = &ttransport.transport; 549 550 /* Set qpair state to make unrelated operations NOP */ 551 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 552 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 553 554 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 555 556 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 557 tcp_req.req.iov[0].iov_len = 101; 558 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 559 tcp_req.req.iov[1].iov_len = 100; 560 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 561 tcp_req.req.iov[2].iov_len = 99; 562 tcp_req.req.iovcnt = 3; 563 tcp_req.req.length = 300; 564 565 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 566 567 c2h_data = &pdu.hdr.c2h_data; 568 CU_ASSERT(c2h_data->datao == 0); 569 CU_ASSERT(c2h_data->datal = 300); 570 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 571 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 572 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 573 574 CU_ASSERT(pdu.data_iovcnt == 3); 575 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 576 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 577 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 578 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 579 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 580 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 581 582 tcp_req.pdu_in_use = false; 583 tcp_req.rsp.cdw0 = 1; 584 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 585 586 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 587 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 588 589 ttransport.tcp_opts.c2h_success = false; 590 tcp_req.pdu_in_use = false; 591 tcp_req.rsp.cdw0 = 0; 592 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 593 594 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 595 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 596 597 tcp_req.pdu_in_use = false; 598 tcp_req.rsp.cdw0 = 1; 599 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 600 601 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 602 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 603 604 spdk_thread_exit(thread); 605 while (!spdk_thread_is_exited(thread)) { 606 spdk_thread_poll(thread, 0, 0); 607 } 608 spdk_thread_destroy(thread); 609 } 610 611 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 612 613 static void 614 test_nvmf_tcp_h2c_data_hdr_handle(void) 615 { 616 struct spdk_nvmf_tcp_transport ttransport = {}; 617 struct spdk_nvmf_tcp_qpair tqpair = {}; 618 struct nvme_tcp_pdu pdu = {}; 619 struct spdk_nvmf_tcp_req tcp_req = {}; 620 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 621 622 /* Set qpair state to make unrelated operations NOP */ 623 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 624 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 625 tqpair.resource_count = 1; 626 tqpair.reqs = &tcp_req; 627 628 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 629 tcp_req.req.iov[0].iov_len = 101; 630 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 631 tcp_req.req.iov[1].iov_len = 99; 632 tcp_req.req.iovcnt = 2; 633 tcp_req.req.length = 200; 634 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 635 636 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 637 tcp_req.req.cmd->nvme_cmd.cid = 1; 638 tcp_req.ttag = 1; 639 640 h2c_data = &pdu.hdr.h2c_data; 641 h2c_data->cccid = 1; 642 h2c_data->ttag = 1; 643 h2c_data->datao = 0; 644 h2c_data->datal = 200; 645 646 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 647 648 CU_ASSERT(pdu.data_iovcnt == 2); 649 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 650 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 651 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 652 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 653 } 654 655 656 static void 657 test_nvmf_tcp_in_capsule_data_handle(void) 658 { 659 struct spdk_nvmf_tcp_transport ttransport = {}; 660 struct spdk_nvmf_tcp_qpair tqpair = {}; 661 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 662 union nvmf_c2h_msg rsp0 = {}; 663 union nvmf_c2h_msg rsp = {}; 664 665 struct spdk_nvmf_request *req_temp = NULL; 666 struct spdk_nvmf_tcp_req tcp_req2 = {}; 667 struct spdk_nvmf_tcp_req tcp_req1 = {}; 668 669 struct spdk_nvme_tcp_cmd *capsule_data; 670 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 671 struct spdk_nvme_sgl_descriptor *sgl; 672 673 struct spdk_nvmf_transport_poll_group *group; 674 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 675 struct spdk_sock_group grp = {}; 676 677 tqpair.pdu_in_progress = &pdu_in_progress; 678 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 679 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 680 681 tcp_group.sock_group = &grp; 682 TAILQ_INIT(&tcp_group.qpairs); 683 group = &tcp_group.group; 684 group->transport = &ttransport.transport; 685 STAILQ_INIT(&group->pending_buf_queue); 686 tqpair.group = &tcp_group; 687 688 TAILQ_INIT(&tqpair.tcp_req_free_queue); 689 TAILQ_INIT(&tqpair.tcp_req_working_queue); 690 691 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 692 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 693 tqpair.qpair.transport = &ttransport.transport; 694 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 695 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 696 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 697 698 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 699 tcp_req2.req.qpair = &tqpair.qpair; 700 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 701 tcp_req2.req.rsp = &rsp; 702 703 /* init tcp_req1 */ 704 tcp_req1.req.qpair = &tqpair.qpair; 705 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 706 tcp_req1.req.rsp = &rsp0; 707 tcp_req1.state = TCP_REQUEST_STATE_NEW; 708 709 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 710 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 711 712 /* init pdu, make pdu need sgl buff */ 713 pdu = tqpair.pdu_in_progress; 714 capsule_data = &pdu->hdr.capsule_cmd; 715 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 716 sgl = &capsule_data->ccsqe.dptr.sgl1; 717 718 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 719 capsule_data->common.hlen = sizeof(*capsule_data); 720 capsule_data->common.plen = 1096; 721 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 722 723 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 724 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 725 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 726 727 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 728 729 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 730 nvmf_tcp_req_process(&ttransport, &tcp_req1); 731 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 732 733 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 734 735 /* process tqpair capsule req. but we still remain req in pending_buff. */ 736 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 737 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 738 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 739 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 740 if (req_temp == &tcp_req2.req) { 741 break; 742 } 743 } 744 CU_ASSERT(req_temp == NULL); 745 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 746 } 747 748 static void 749 test_nvmf_tcp_qpair_init_mem_resource(void) 750 { 751 int rc; 752 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 753 struct spdk_nvmf_transport transport = {}; 754 struct spdk_thread *thread; 755 756 thread = spdk_thread_create(NULL, NULL); 757 SPDK_CU_ASSERT_FATAL(thread != NULL); 758 spdk_set_thread(thread); 759 760 tqpair = calloc(1, sizeof(*tqpair)); 761 tqpair->qpair.transport = &transport; 762 763 nvmf_tcp_opts_init(&transport.opts); 764 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 765 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 766 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 767 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 768 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 769 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 770 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 771 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 772 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 773 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 774 CU_ASSERT(transport.opts.transport_specific == NULL); 775 776 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 777 CU_ASSERT(rc == 0); 778 CU_ASSERT(tqpair->host_hdgst_enable == true); 779 CU_ASSERT(tqpair->host_ddgst_enable == true); 780 781 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 782 CU_ASSERT(rc == 0); 783 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 784 CU_ASSERT(tqpair->reqs != NULL); 785 CU_ASSERT(tqpair->bufs != NULL); 786 CU_ASSERT(tqpair->pdus != NULL); 787 /* Just to check the first and last entry */ 788 CU_ASSERT(tqpair->reqs[0].ttag == 1); 789 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 790 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 791 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 792 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 793 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 794 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 795 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 796 CU_ASSERT(tqpair->reqs[127].ttag == 128); 797 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 798 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 799 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 800 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 801 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 802 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 803 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 804 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 805 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 806 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 807 CU_ASSERT(tqpair->pdu_in_progress == 808 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 809 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 810 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 811 812 /* Free all of tqpair resource */ 813 nvmf_tcp_qpair_destroy(tqpair); 814 815 spdk_thread_exit(thread); 816 while (!spdk_thread_is_exited(thread)) { 817 spdk_thread_poll(thread, 0, 0); 818 } 819 spdk_thread_destroy(thread); 820 } 821 822 static void 823 test_nvmf_tcp_send_c2h_term_req(void) 824 { 825 struct spdk_nvmf_tcp_qpair tqpair = {}; 826 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 827 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 828 uint32_t error_offset = 1; 829 830 mgmt_pdu.qpair = &tqpair; 831 tqpair.mgmt_pdu = &mgmt_pdu; 832 tqpair.pdu_in_progress = &pdu_in_progress; 833 tqpair.tcp_pdu_working_count = 1; 834 835 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 836 pdu.hdr.common.hlen = 64; 837 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 838 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 839 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 840 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 841 pdu.hdr.common.hlen); 842 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 843 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 844 845 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 846 pdu.hdr.common.hlen = 255; 847 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 848 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 849 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 850 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 851 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 852 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 853 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 854 } 855 856 static void 857 test_nvmf_tcp_send_capsule_resp_pdu(void) 858 { 859 struct spdk_nvmf_tcp_req tcp_req = {}; 860 struct spdk_nvmf_tcp_qpair tqpair = {}; 861 struct nvme_tcp_pdu pdu = {}; 862 863 tcp_req.pdu_in_use = false; 864 tcp_req.req.qpair = &tqpair.qpair; 865 tcp_req.pdu = &pdu; 866 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 867 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 868 tqpair.host_hdgst_enable = true; 869 870 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 871 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 872 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 873 SPDK_NVME_TCP_DIGEST_LEN); 874 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 875 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 876 sizeof(struct spdk_nvme_cpl))); 877 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 878 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 879 CU_ASSERT(pdu.cb_arg == &tcp_req); 880 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 881 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 882 883 /* hdgst disable */ 884 tqpair.host_hdgst_enable = false; 885 tcp_req.pdu_in_use = false; 886 memset(&pdu, 0, sizeof(pdu)); 887 888 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 889 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 890 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 891 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 892 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 893 sizeof(struct spdk_nvme_cpl))); 894 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 895 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 896 CU_ASSERT(pdu.cb_arg == &tcp_req); 897 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 898 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 899 } 900 901 static void 902 test_nvmf_tcp_icreq_handle(void) 903 { 904 struct spdk_nvmf_tcp_transport ttransport = {}; 905 struct spdk_nvmf_tcp_qpair tqpair = {}; 906 struct nvme_tcp_pdu pdu = {}; 907 struct nvme_tcp_pdu mgmt_pdu = {}; 908 struct nvme_tcp_pdu pdu_in_progress = {}; 909 struct spdk_nvme_tcp_ic_resp *ic_resp; 910 911 mgmt_pdu.qpair = &tqpair; 912 tqpair.mgmt_pdu = &mgmt_pdu; 913 tqpair.pdu_in_progress = &pdu_in_progress; 914 tqpair.tcp_pdu_working_count = 1; 915 916 /* case 1: Expected ICReq PFV 0 and got are different. */ 917 pdu.hdr.ic_req.pfv = 1; 918 919 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 920 921 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 922 923 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 924 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 925 926 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 927 928 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 929 930 /* case 3: Expect: PASS. */ 931 ttransport.transport.opts.max_io_size = 32; 932 pdu.hdr.ic_req.pfv = 0; 933 tqpair.host_hdgst_enable = false; 934 tqpair.host_ddgst_enable = false; 935 tqpair.recv_buf_size = 64; 936 pdu.hdr.ic_req.hpda = 16; 937 938 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 939 940 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 941 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 942 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 943 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 944 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 945 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 946 CU_ASSERT(ic_resp->pfv == 0); 947 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 948 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 949 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 950 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 951 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 952 } 953 954 static void 955 test_nvmf_tcp_check_xfer_type(void) 956 { 957 const uint16_t cid = 0xAA; 958 struct spdk_nvmf_tcp_transport ttransport = {}; 959 struct spdk_nvmf_tcp_qpair tqpair = {}; 960 struct nvme_tcp_pdu pdu_in_progress = {}; 961 union nvmf_c2h_msg rsp0 = {}; 962 963 struct spdk_nvmf_tcp_req tcp_req = {}; 964 struct nvme_tcp_pdu rsp_pdu = {}; 965 966 struct spdk_nvme_tcp_cmd *capsule_data; 967 struct spdk_nvme_sgl_descriptor *sgl; 968 969 struct spdk_nvmf_transport_poll_group *group; 970 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 971 struct spdk_sock_group grp = {}; 972 973 tqpair.pdu_in_progress = &pdu_in_progress; 974 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 975 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 976 977 tcp_group.sock_group = &grp; 978 TAILQ_INIT(&tcp_group.qpairs); 979 group = &tcp_group.group; 980 group->transport = &ttransport.transport; 981 STAILQ_INIT(&group->pending_buf_queue); 982 tqpair.group = &tcp_group; 983 984 TAILQ_INIT(&tqpair.tcp_req_free_queue); 985 TAILQ_INIT(&tqpair.tcp_req_working_queue); 986 987 tqpair.qpair.transport = &ttransport.transport; 988 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 989 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 990 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 991 992 /* init tcp_req */ 993 tcp_req.req.qpair = &tqpair.qpair; 994 tcp_req.pdu = &rsp_pdu; 995 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 996 tcp_req.req.rsp = &rsp0; 997 tcp_req.state = TCP_REQUEST_STATE_NEW; 998 999 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1000 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1001 1002 /* init pdu, make pdu need sgl buff */ 1003 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1004 sgl = &capsule_data->ccsqe.dptr.sgl1; 1005 1006 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1007 capsule_data->common.hlen = sizeof(*capsule_data); 1008 capsule_data->common.plen = 1096; 1009 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1010 /* Need to set to a non zero valid to check it gets copied to the response */ 1011 capsule_data->ccsqe.cid = cid; 1012 1013 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1014 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1015 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1016 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1017 1018 /* Process a command and ensure that it fails and the request is set up to return an error */ 1019 nvmf_tcp_req_process(&ttransport, &tcp_req); 1020 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1021 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1022 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1023 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1024 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1025 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1026 } 1027 1028 static void 1029 test_nvmf_tcp_invalid_sgl(void) 1030 { 1031 const uint16_t cid = 0xAABB; 1032 struct spdk_nvmf_tcp_transport ttransport = {}; 1033 struct spdk_nvmf_tcp_qpair tqpair = {}; 1034 struct nvme_tcp_pdu pdu_in_progress = {}; 1035 union nvmf_c2h_msg rsp0 = {}; 1036 1037 struct spdk_nvmf_tcp_req tcp_req = {}; 1038 struct nvme_tcp_pdu rsp_pdu = {}; 1039 struct nvme_tcp_pdu mgmt_pdu = {}; 1040 1041 struct spdk_nvme_tcp_cmd *capsule_data; 1042 struct spdk_nvme_sgl_descriptor *sgl; 1043 1044 struct spdk_nvmf_transport_poll_group *group; 1045 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1046 struct spdk_sock_group grp = {}; 1047 1048 tqpair.pdu_in_progress = &pdu_in_progress; 1049 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1050 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1051 1052 tcp_group.sock_group = &grp; 1053 TAILQ_INIT(&tcp_group.qpairs); 1054 group = &tcp_group.group; 1055 group->transport = &ttransport.transport; 1056 STAILQ_INIT(&group->pending_buf_queue); 1057 tqpair.group = &tcp_group; 1058 1059 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1060 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1061 1062 tqpair.qpair.transport = &ttransport.transport; 1063 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1064 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1065 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1066 1067 /* init tcp_req */ 1068 tcp_req.req.qpair = &tqpair.qpair; 1069 tcp_req.pdu = &rsp_pdu; 1070 tcp_req.pdu->qpair = &tqpair; 1071 tqpair.mgmt_pdu = &mgmt_pdu; 1072 tqpair.mgmt_pdu->qpair = &tqpair; 1073 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1074 tcp_req.req.rsp = &rsp0; 1075 tcp_req.state = TCP_REQUEST_STATE_NEW; 1076 1077 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1078 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1079 1080 /* init pdu, make pdu need sgl buff */ 1081 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1082 sgl = &capsule_data->ccsqe.dptr.sgl1; 1083 1084 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1085 capsule_data->common.hlen = sizeof(*capsule_data); 1086 capsule_data->common.plen = 1096; 1087 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1088 /* Need to set to a non zero valid to check it gets copied to the response */ 1089 capsule_data->ccsqe.cid = cid; 1090 1091 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1092 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1093 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1094 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1095 1096 /* Process a command and ensure that it fails and the request is set up to return an error */ 1097 nvmf_tcp_req_process(&ttransport, &tcp_req); 1098 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1099 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1100 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1101 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1102 } 1103 1104 static void 1105 test_nvmf_tcp_pdu_ch_handle(void) 1106 { 1107 struct spdk_nvmf_tcp_qpair tqpair = {}; 1108 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1109 1110 mgmt_pdu.qpair = &tqpair; 1111 tqpair.mgmt_pdu = &mgmt_pdu; 1112 tqpair.pdu_in_progress = &pdu_in_progress; 1113 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1114 tqpair.cpda = 0; 1115 1116 /* Test case: Already received ICreq PDU. Expect: fail */ 1117 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1118 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1119 nvmf_tcp_pdu_ch_handle(&tqpair); 1120 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1121 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1122 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1123 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1124 1125 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1126 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1127 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1128 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1129 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1130 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1131 nvmf_tcp_pdu_ch_handle(&tqpair); 1132 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1133 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1134 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1135 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1136 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1137 1138 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1139 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1140 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1141 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1142 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1143 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1144 nvmf_tcp_pdu_ch_handle(&tqpair); 1145 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1146 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1147 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1148 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1149 1150 /* Test case: Unexpected PDU type. Expect: fail */ 1151 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1152 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1153 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1154 tqpair.pdu_in_progress->hdr.common.plen = 0; 1155 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1156 nvmf_tcp_pdu_ch_handle(&tqpair); 1157 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1158 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1159 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1160 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1161 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1162 1163 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1164 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1165 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1166 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1167 tqpair.pdu_in_progress->hdr.common.plen = 0; 1168 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1169 nvmf_tcp_pdu_ch_handle(&tqpair); 1170 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1171 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1172 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1173 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1174 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1175 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1176 1177 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1178 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1179 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1180 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1181 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1182 tqpair.pdu_in_progress->hdr.common.plen = 0; 1183 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1184 nvmf_tcp_pdu_ch_handle(&tqpair); 1185 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1186 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1187 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1188 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1189 struct spdk_nvme_tcp_term_req_hdr)); 1190 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1191 1192 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1193 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1194 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1195 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1196 tqpair.pdu_in_progress->hdr.common.plen = 0; 1197 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1198 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1199 nvmf_tcp_pdu_ch_handle(&tqpair); 1200 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1201 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1202 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1203 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1204 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1205 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1206 1207 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1208 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1209 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1210 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1211 tqpair.pdu_in_progress->hdr.common.plen = 0; 1212 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1213 nvmf_tcp_pdu_ch_handle(&tqpair); 1214 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1215 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1216 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1217 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1218 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1219 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1220 1221 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1222 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1223 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1224 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1225 tqpair.cpda = 1; 1226 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1227 tqpair.pdu_in_progress->hdr.common.plen = 0; 1228 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1229 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1230 nvmf_tcp_pdu_ch_handle(&tqpair); 1231 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1232 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1233 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1234 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1235 struct spdk_nvme_tcp_term_req_hdr)); 1236 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1237 1238 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1239 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1240 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1241 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1242 tqpair.cpda = 1; 1243 tqpair.pdu_in_progress->hdr.common.plen = 0; 1244 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1245 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1246 nvmf_tcp_pdu_ch_handle(&tqpair); 1247 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1248 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1249 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1250 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1251 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1252 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1253 1254 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1255 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1256 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1257 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1258 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1259 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1260 nvmf_tcp_pdu_ch_handle(&tqpair); 1261 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1262 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1263 struct spdk_nvme_tcp_common_pdu_hdr)); 1264 } 1265 1266 int 1267 main(int argc, char **argv) 1268 { 1269 CU_pSuite suite = NULL; 1270 unsigned int num_failures; 1271 1272 CU_set_error_action(CUEA_ABORT); 1273 CU_initialize_registry(); 1274 1275 suite = CU_add_suite("nvmf", NULL, NULL); 1276 1277 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1278 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1279 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1280 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1281 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1282 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1283 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1284 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1285 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1286 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1287 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1288 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1289 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1290 1291 CU_basic_set_mode(CU_BRM_VERBOSE); 1292 CU_basic_run_tests(); 1293 num_failures = CU_get_number_of_failures(); 1294 CU_cleanup_registry(); 1295 return num_failures; 1296 } 1297