1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * * Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * * Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in 16 * the documentation and/or other materials provided with the 17 * distribution. 18 * * Neither the name of Intel Corporation nor the names of its 19 * contributors may be used to endorse or promote products derived 20 * from this software without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "spdk/stdinc.h" 36 #include "spdk/nvmf_spec.h" 37 #include "spdk_cunit.h" 38 39 #include "spdk_internal/mock.h" 40 41 #include "common/lib/test_env.c" 42 #include "common/lib/test_sock.c" 43 44 #include "nvmf/ctrlr.c" 45 #include "nvmf/tcp.c" 46 47 #define UT_IPV4_ADDR "192.168.0.1" 48 #define UT_PORT "4420" 49 #define UT_NVMF_ADRFAM_INVALID 0xf 50 #define UT_MAX_QUEUE_DEPTH 128 51 #define UT_MAX_QPAIRS_PER_CTRLR 128 52 #define UT_IN_CAPSULE_DATA_SIZE 1024 53 #define UT_MAX_IO_SIZE 4096 54 #define UT_IO_UNIT_SIZE 1024 55 #define UT_MAX_AQ_DEPTH 64 56 #define UT_SQ_HEAD_MAX 128 57 #define UT_NUM_SHARED_BUFFERS 128 58 59 static void *g_accel_p = (void *)0xdeadbeaf; 60 61 SPDK_LOG_REGISTER_COMPONENT(nvmf) 62 63 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 64 int, 65 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 66 0); 67 68 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 69 int, 70 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 71 0); 72 73 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 74 struct spdk_nvmf_ctrlr *, 75 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 76 NULL); 77 78 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 79 struct spdk_nvmf_subsystem *, 80 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 81 NULL); 82 83 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 84 bool, 85 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 86 true); 87 88 DEFINE_STUB(nvmf_subsystem_find_listener, 89 struct spdk_nvmf_subsystem_listener *, 90 (struct spdk_nvmf_subsystem *subsystem, 91 const struct spdk_nvme_transport_id *trid), 92 (void *)0x1); 93 94 DEFINE_STUB_V(nvmf_get_discovery_log_page, 95 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 96 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 97 98 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 99 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 100 101 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 102 struct spdk_nvmf_ns *, 103 (struct spdk_nvmf_subsystem *subsystem), 104 NULL); 105 106 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 107 struct spdk_nvmf_ns *, 108 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 109 NULL); 110 111 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 112 bool, 113 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 114 true); 115 116 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 117 bool, 118 (struct spdk_nvmf_ctrlr *ctrlr), 119 false); 120 121 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 122 bool, 123 (struct spdk_nvmf_ctrlr *ctrlr), 124 false); 125 126 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 127 int, 128 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 129 struct spdk_nvmf_request *req), 130 0); 131 132 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 133 int, 134 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 135 struct spdk_nvmf_request *req), 136 0); 137 138 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 139 int, 140 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 141 struct spdk_nvmf_request *req), 142 0); 143 144 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 145 int, 146 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 147 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 148 0); 149 150 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 151 int, 152 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 153 struct spdk_nvmf_request *req), 154 0); 155 156 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 157 int, 158 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 159 struct spdk_nvmf_request *req), 160 0); 161 162 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 163 int, 164 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 165 struct spdk_nvmf_request *req), 166 0); 167 168 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 169 int, 170 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 171 struct spdk_nvmf_request *req), 172 0); 173 174 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 175 int, 176 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 177 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 178 0); 179 180 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 181 bool, 182 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 183 false); 184 185 DEFINE_STUB(nvmf_transport_req_complete, 186 int, 187 (struct spdk_nvmf_request *req), 188 0); 189 190 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 191 bool, 192 (struct spdk_bdev *bdev), 193 false); 194 195 DEFINE_STUB(nvmf_bdev_ctrlr_start_zcopy, 196 int, 197 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 198 struct spdk_nvmf_request *req), 199 0); 200 201 DEFINE_STUB(nvmf_bdev_ctrlr_end_zcopy, 202 int, 203 (struct spdk_nvmf_request *req, bool commit), 204 0); 205 206 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 207 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 208 struct spdk_nvmf_transport *transport)); 209 210 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 211 int, 212 (struct spdk_sock *sock, struct spdk_sock_group **group), 213 0); 214 215 DEFINE_STUB(spdk_sock_group_get_ctx, 216 void *, 217 (struct spdk_sock_group *group), 218 NULL); 219 220 DEFINE_STUB(spdk_sock_set_priority, 221 int, 222 (struct spdk_sock *sock, int priority), 223 0); 224 225 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 226 227 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 228 enum spdk_nvme_transport_type trtype)); 229 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 230 231 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 232 233 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 234 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 235 236 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 237 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 238 239 DEFINE_STUB(nvmf_transport_req_free, 240 int, 241 (struct spdk_nvmf_request *req), 242 0); 243 244 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0); 245 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf)); 246 247 struct spdk_io_channel * 248 spdk_accel_engine_get_io_channel(void) 249 { 250 return spdk_get_io_channel(g_accel_p); 251 } 252 253 DEFINE_STUB(spdk_accel_submit_crc32cv, 254 int, 255 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 256 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 257 0); 258 259 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 260 int, 261 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 262 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 263 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 264 0) 265 266 struct spdk_bdev { 267 int ut_mock; 268 uint64_t blockcnt; 269 }; 270 271 int 272 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 273 const struct spdk_nvme_transport_id *trid2) 274 { 275 return 0; 276 } 277 278 const char * 279 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 280 { 281 switch (trtype) { 282 case SPDK_NVME_TRANSPORT_PCIE: 283 return "PCIe"; 284 case SPDK_NVME_TRANSPORT_RDMA: 285 return "RDMA"; 286 case SPDK_NVME_TRANSPORT_FC: 287 return "FC"; 288 default: 289 return NULL; 290 } 291 } 292 293 int 294 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 295 { 296 int len, i; 297 298 if (trstring == NULL) { 299 return -EINVAL; 300 } 301 302 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 303 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 304 return -EINVAL; 305 } 306 307 /* cast official trstring to uppercase version of input. */ 308 for (i = 0; i < len; i++) { 309 trid->trstring[i] = toupper(trstring[i]); 310 } 311 return 0; 312 } 313 314 int 315 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 316 { 317 return 0; 318 } 319 320 int 321 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 322 struct spdk_nvmf_transport_poll_group *group, 323 struct spdk_nvmf_transport *transport, 324 uint32_t length) 325 { 326 /* length more than 1 io unit length will fail. */ 327 if (length >= transport->opts.io_unit_size) { 328 return -EINVAL; 329 } 330 331 req->iovcnt = 1; 332 req->iov[0].iov_base = (void *)0xDEADBEEF; 333 334 return 0; 335 } 336 337 338 void 339 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 340 bool dif_insert_or_strip) 341 { 342 uint64_t num_blocks; 343 344 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 345 num_blocks = ns->bdev->blockcnt; 346 nsdata->nsze = num_blocks; 347 nsdata->ncap = num_blocks; 348 nsdata->nuse = num_blocks; 349 nsdata->nlbaf = 0; 350 nsdata->flbas.format = 0; 351 nsdata->lbaf[0].lbads = spdk_u32log2(512); 352 } 353 354 const char * 355 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 356 { 357 return subsystem->sn; 358 } 359 360 const char * 361 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 362 { 363 return subsystem->mn; 364 } 365 366 static void 367 test_nvmf_tcp_create(void) 368 { 369 struct spdk_thread *thread; 370 struct spdk_nvmf_transport *transport; 371 struct spdk_nvmf_tcp_transport *ttransport; 372 struct spdk_nvmf_transport_opts opts; 373 374 thread = spdk_thread_create(NULL, NULL); 375 SPDK_CU_ASSERT_FATAL(thread != NULL); 376 spdk_set_thread(thread); 377 378 /* case 1 */ 379 memset(&opts, 0, sizeof(opts)); 380 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 381 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 382 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 383 opts.max_io_size = UT_MAX_IO_SIZE; 384 opts.io_unit_size = UT_IO_UNIT_SIZE; 385 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 386 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 387 /* expect success */ 388 transport = nvmf_tcp_create(&opts); 389 CU_ASSERT_PTR_NOT_NULL(transport); 390 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 391 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 392 transport->opts = opts; 393 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 394 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 395 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 396 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 397 /* destroy transport */ 398 spdk_mempool_free(ttransport->transport.data_buf_pool); 399 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 400 401 /* case 2 */ 402 memset(&opts, 0, sizeof(opts)); 403 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 404 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 405 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 406 opts.max_io_size = UT_MAX_IO_SIZE; 407 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 408 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 409 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 410 /* expect success */ 411 transport = nvmf_tcp_create(&opts); 412 CU_ASSERT_PTR_NOT_NULL(transport); 413 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 414 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 415 transport->opts = opts; 416 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 417 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 418 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 419 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 420 /* destroy transport */ 421 spdk_mempool_free(ttransport->transport.data_buf_pool); 422 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 423 424 /* case 3 */ 425 memset(&opts, 0, sizeof(opts)); 426 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 427 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 428 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 429 opts.max_io_size = UT_MAX_IO_SIZE; 430 opts.io_unit_size = 16; 431 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 432 /* expect fails */ 433 transport = nvmf_tcp_create(&opts); 434 CU_ASSERT_PTR_NULL(transport); 435 436 spdk_thread_exit(thread); 437 while (!spdk_thread_is_exited(thread)) { 438 spdk_thread_poll(thread, 0, 0); 439 } 440 spdk_thread_destroy(thread); 441 } 442 443 static void 444 test_nvmf_tcp_destroy(void) 445 { 446 struct spdk_thread *thread; 447 struct spdk_nvmf_transport *transport; 448 struct spdk_nvmf_transport_opts opts; 449 450 thread = spdk_thread_create(NULL, NULL); 451 SPDK_CU_ASSERT_FATAL(thread != NULL); 452 spdk_set_thread(thread); 453 454 /* case 1 */ 455 memset(&opts, 0, sizeof(opts)); 456 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 457 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 458 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 459 opts.max_io_size = UT_MAX_IO_SIZE; 460 opts.io_unit_size = UT_IO_UNIT_SIZE; 461 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 462 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 463 transport = nvmf_tcp_create(&opts); 464 CU_ASSERT_PTR_NOT_NULL(transport); 465 transport->opts = opts; 466 /* destroy transport */ 467 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 468 469 spdk_thread_exit(thread); 470 while (!spdk_thread_is_exited(thread)) { 471 spdk_thread_poll(thread, 0, 0); 472 } 473 spdk_thread_destroy(thread); 474 } 475 476 static void 477 init_accel(void) 478 { 479 spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb, 480 sizeof(int), "accel_p"); 481 } 482 483 static void 484 fini_accel(void) 485 { 486 spdk_io_device_unregister(g_accel_p, NULL); 487 } 488 489 static void 490 test_nvmf_tcp_poll_group_create(void) 491 { 492 struct spdk_nvmf_transport *transport; 493 struct spdk_nvmf_transport_poll_group *group; 494 struct spdk_nvmf_tcp_poll_group *tgroup; 495 struct spdk_thread *thread; 496 struct spdk_nvmf_transport_opts opts; 497 struct spdk_sock_group grp = {}; 498 499 thread = spdk_thread_create(NULL, NULL); 500 SPDK_CU_ASSERT_FATAL(thread != NULL); 501 spdk_set_thread(thread); 502 503 init_accel(); 504 505 memset(&opts, 0, sizeof(opts)); 506 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 507 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 508 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 509 opts.max_io_size = UT_MAX_IO_SIZE; 510 opts.io_unit_size = UT_IO_UNIT_SIZE; 511 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 512 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 513 transport = nvmf_tcp_create(&opts); 514 CU_ASSERT_PTR_NOT_NULL(transport); 515 transport->opts = opts; 516 MOCK_SET(spdk_sock_group_create, &grp); 517 group = nvmf_tcp_poll_group_create(transport); 518 MOCK_CLEAR_P(spdk_sock_group_create); 519 SPDK_CU_ASSERT_FATAL(group); 520 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 521 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 522 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 523 } 524 group->transport = transport; 525 nvmf_tcp_poll_group_destroy(group); 526 nvmf_tcp_destroy(transport, NULL, NULL); 527 528 fini_accel(); 529 spdk_thread_exit(thread); 530 while (!spdk_thread_is_exited(thread)) { 531 spdk_thread_poll(thread, 0, 0); 532 } 533 spdk_thread_destroy(thread); 534 } 535 536 static void 537 test_nvmf_tcp_send_c2h_data(void) 538 { 539 struct spdk_thread *thread; 540 struct spdk_nvmf_tcp_transport ttransport = {}; 541 struct spdk_nvmf_tcp_qpair tqpair = {}; 542 struct spdk_nvmf_tcp_req tcp_req = {}; 543 struct nvme_tcp_pdu pdu = {}; 544 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 545 546 ttransport.tcp_opts.c2h_success = true; 547 thread = spdk_thread_create(NULL, NULL); 548 SPDK_CU_ASSERT_FATAL(thread != NULL); 549 spdk_set_thread(thread); 550 551 tcp_req.pdu = &pdu; 552 tcp_req.req.length = 300; 553 tcp_req.req.qpair = &tqpair.qpair; 554 555 tqpair.qpair.transport = &ttransport.transport; 556 557 /* Set qpair state to make unrelated operations NOP */ 558 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 559 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 560 561 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 562 563 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 564 tcp_req.req.iov[0].iov_len = 101; 565 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 566 tcp_req.req.iov[1].iov_len = 100; 567 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 568 tcp_req.req.iov[2].iov_len = 99; 569 tcp_req.req.iovcnt = 3; 570 tcp_req.req.length = 300; 571 572 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 573 574 c2h_data = &pdu.hdr.c2h_data; 575 CU_ASSERT(c2h_data->datao == 0); 576 CU_ASSERT(c2h_data->datal = 300); 577 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 578 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 579 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 580 581 CU_ASSERT(pdu.data_iovcnt == 3); 582 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 583 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 584 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 585 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 586 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 587 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 588 589 tcp_req.pdu_in_use = false; 590 tcp_req.rsp.cdw0 = 1; 591 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 592 593 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 594 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 595 596 ttransport.tcp_opts.c2h_success = false; 597 tcp_req.pdu_in_use = false; 598 tcp_req.rsp.cdw0 = 0; 599 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 600 601 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 602 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 603 604 tcp_req.pdu_in_use = false; 605 tcp_req.rsp.cdw0 = 1; 606 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 607 608 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 609 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 610 611 spdk_thread_exit(thread); 612 while (!spdk_thread_is_exited(thread)) { 613 spdk_thread_poll(thread, 0, 0); 614 } 615 spdk_thread_destroy(thread); 616 } 617 618 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 619 620 static void 621 test_nvmf_tcp_h2c_data_hdr_handle(void) 622 { 623 struct spdk_nvmf_tcp_transport ttransport = {}; 624 struct spdk_nvmf_tcp_qpair tqpair = {}; 625 struct nvme_tcp_pdu pdu = {}; 626 struct spdk_nvmf_tcp_req tcp_req = {}; 627 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 628 629 TAILQ_INIT(&tqpair.tcp_req_working_queue); 630 631 /* Set qpair state to make unrelated operations NOP */ 632 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 633 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 634 635 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 636 tcp_req.req.iov[0].iov_len = 101; 637 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 638 tcp_req.req.iov[1].iov_len = 99; 639 tcp_req.req.iovcnt = 2; 640 tcp_req.req.length = 200; 641 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 642 643 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 644 tcp_req.req.cmd->nvme_cmd.cid = 1; 645 tcp_req.ttag = 2; 646 647 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, 648 &tcp_req, state_link); 649 650 h2c_data = &pdu.hdr.h2c_data; 651 h2c_data->cccid = 1; 652 h2c_data->ttag = 2; 653 h2c_data->datao = 0; 654 h2c_data->datal = 200; 655 656 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 657 658 CU_ASSERT(pdu.data_iovcnt == 2); 659 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 660 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 661 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 662 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 663 664 CU_ASSERT(TAILQ_FIRST(&tqpair.tcp_req_working_queue) == 665 &tcp_req); 666 TAILQ_REMOVE(&tqpair.tcp_req_working_queue, 667 &tcp_req, state_link); 668 } 669 670 671 static void 672 test_nvmf_tcp_in_capsule_data_handle(void) 673 { 674 struct spdk_nvmf_tcp_transport ttransport = {}; 675 struct spdk_nvmf_tcp_qpair tqpair = {}; 676 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 677 union nvmf_c2h_msg rsp0 = {}; 678 union nvmf_c2h_msg rsp = {}; 679 680 struct spdk_nvmf_request *req_temp = NULL; 681 struct spdk_nvmf_tcp_req tcp_req2 = {}; 682 struct spdk_nvmf_tcp_req tcp_req1 = {}; 683 684 struct spdk_nvme_tcp_cmd *capsule_data; 685 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 686 struct spdk_nvme_sgl_descriptor *sgl; 687 688 struct spdk_nvmf_transport_poll_group *group; 689 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 690 struct spdk_sock_group grp = {}; 691 692 tqpair.pdu_in_progress = &pdu_in_progress; 693 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 694 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 695 696 tcp_group.sock_group = &grp; 697 TAILQ_INIT(&tcp_group.qpairs); 698 group = &tcp_group.group; 699 group->transport = &ttransport.transport; 700 STAILQ_INIT(&group->pending_buf_queue); 701 tqpair.group = &tcp_group; 702 703 TAILQ_INIT(&tqpair.tcp_req_free_queue); 704 TAILQ_INIT(&tqpair.tcp_req_working_queue); 705 706 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 707 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 708 tqpair.qpair.transport = &ttransport.transport; 709 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 710 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 711 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 712 713 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 714 tcp_req2.req.qpair = &tqpair.qpair; 715 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 716 tcp_req2.req.rsp = &rsp; 717 718 /* init tcp_req1 */ 719 tcp_req1.req.qpair = &tqpair.qpair; 720 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 721 tcp_req1.req.rsp = &rsp0; 722 tcp_req1.state = TCP_REQUEST_STATE_NEW; 723 724 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 725 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 726 727 /* init pdu, make pdu need sgl buff */ 728 pdu = tqpair.pdu_in_progress; 729 capsule_data = &pdu->hdr.capsule_cmd; 730 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 731 sgl = &capsule_data->ccsqe.dptr.sgl1; 732 733 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 734 capsule_data->common.hlen = sizeof(*capsule_data); 735 capsule_data->common.plen = 1096; 736 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 737 738 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 739 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 740 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 741 742 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 743 744 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 745 nvmf_tcp_req_process(&ttransport, &tcp_req1); 746 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 747 748 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 749 750 /* process tqpair capsule req. but we still remain req in pending_buff. */ 751 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 752 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 753 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 754 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 755 if (req_temp == &tcp_req2.req) { 756 break; 757 } 758 } 759 CU_ASSERT(req_temp == NULL); 760 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 761 } 762 763 static void 764 test_nvmf_tcp_qpair_init_mem_resource(void) 765 { 766 int rc; 767 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 768 struct spdk_nvmf_transport transport = {}; 769 770 tqpair = calloc(1, sizeof(*tqpair)); 771 tqpair->qpair.transport = &transport; 772 773 nvmf_tcp_opts_init(&transport.opts); 774 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 775 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 776 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 777 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 778 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 779 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH); 780 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 781 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 782 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 783 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 784 CU_ASSERT(transport.opts.transport_specific == NULL); 785 786 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 787 CU_ASSERT(rc == 0); 788 CU_ASSERT(tqpair->host_hdgst_enable == true); 789 CU_ASSERT(tqpair->host_ddgst_enable == true); 790 791 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 792 CU_ASSERT(rc == 0); 793 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 794 CU_ASSERT(tqpair->reqs != NULL); 795 CU_ASSERT(tqpair->bufs != NULL); 796 CU_ASSERT(tqpair->pdus != NULL); 797 /* Just to check the first and last entry */ 798 CU_ASSERT(tqpair->reqs[0].ttag == 1); 799 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 800 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 801 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 802 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 803 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 804 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 805 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 806 CU_ASSERT(tqpair->reqs[127].ttag == 128); 807 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 808 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 809 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 810 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 811 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 812 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 813 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 814 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH); 815 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH]); 816 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 817 CU_ASSERT(tqpair->pdu_in_progress == &tqpair->pdus[SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH + 1]); 818 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 819 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 820 821 /* Free all of tqpair resource */ 822 nvmf_tcp_qpair_destroy(tqpair); 823 } 824 825 static void 826 test_nvmf_tcp_send_c2h_term_req(void) 827 { 828 struct spdk_nvmf_tcp_qpair tqpair = {}; 829 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 830 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 831 uint32_t error_offset = 1; 832 833 mgmt_pdu.sgl.total_size = 0; 834 mgmt_pdu.qpair = &tqpair; 835 tqpair.mgmt_pdu = &mgmt_pdu; 836 tqpair.pdu_in_progress = &pdu_in_progress; 837 838 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 839 pdu.hdr.common.hlen = 64; 840 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 841 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 842 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 843 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 844 pdu.hdr.common.hlen); 845 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 846 847 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 848 pdu.hdr.common.hlen = 255; 849 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 850 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 851 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 852 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 853 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 854 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 855 } 856 857 static void 858 test_nvmf_tcp_send_capsule_resp_pdu(void) 859 { 860 struct spdk_nvmf_tcp_req tcp_req = {}; 861 struct spdk_nvmf_tcp_qpair tqpair = {}; 862 struct nvme_tcp_pdu pdu = {}; 863 864 tcp_req.pdu_in_use = false; 865 tcp_req.req.qpair = &tqpair.qpair; 866 tcp_req.pdu = &pdu; 867 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 868 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 869 tqpair.host_hdgst_enable = true; 870 871 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 872 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 873 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 874 SPDK_NVME_TCP_DIGEST_LEN); 875 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 876 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 877 sizeof(struct spdk_nvme_cpl))); 878 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 879 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 880 CU_ASSERT(pdu.cb_arg == &tcp_req); 881 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 882 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 883 884 /* hdgst disable */ 885 tqpair.host_hdgst_enable = false; 886 tcp_req.pdu_in_use = false; 887 memset(&pdu, 0, sizeof(pdu)); 888 889 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 890 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 891 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 892 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 893 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 894 sizeof(struct spdk_nvme_cpl))); 895 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 896 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 897 CU_ASSERT(pdu.cb_arg == &tcp_req); 898 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 899 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 900 } 901 902 static void 903 test_nvmf_tcp_icreq_handle(void) 904 { 905 struct spdk_nvmf_tcp_transport ttransport = {}; 906 struct spdk_nvmf_tcp_qpair tqpair = {}; 907 struct nvme_tcp_pdu pdu = {}; 908 struct nvme_tcp_pdu mgmt_pdu = {}; 909 struct nvme_tcp_pdu pdu_in_progress = {}; 910 struct spdk_nvme_tcp_ic_resp *ic_resp; 911 912 mgmt_pdu.qpair = &tqpair; 913 tqpair.mgmt_pdu = &mgmt_pdu; 914 tqpair.pdu_in_progress = &pdu_in_progress; 915 916 /* case 1: Expected ICReq PFV 0 and got are different. */ 917 pdu.hdr.ic_req.pfv = 1; 918 919 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 920 921 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR); 922 923 /* case 2: Expect: PASS. */ 924 ttransport.transport.opts.max_io_size = 32; 925 pdu.hdr.ic_req.pfv = 0; 926 tqpair.host_hdgst_enable = false; 927 tqpair.host_ddgst_enable = false; 928 tqpair.recv_buf_size = 64; 929 pdu.hdr.ic_req.hpda = 16; 930 931 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 932 933 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 934 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 935 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 936 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 937 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 938 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 939 CU_ASSERT(ic_resp->pfv == 0); 940 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 941 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 942 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 943 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 944 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 945 } 946 947 static void 948 test_nvmf_tcp_check_xfer_type(void) 949 { 950 const uint16_t cid = 0xAA; 951 struct spdk_nvmf_tcp_transport ttransport = {}; 952 struct spdk_nvmf_tcp_qpair tqpair = {}; 953 struct nvme_tcp_pdu pdu_in_progress = {}; 954 union nvmf_c2h_msg rsp0 = {}; 955 956 struct spdk_nvmf_tcp_req tcp_req = {}; 957 struct nvme_tcp_pdu rsp_pdu = {}; 958 959 struct spdk_nvme_tcp_cmd *capsule_data; 960 struct spdk_nvme_sgl_descriptor *sgl; 961 962 struct spdk_nvmf_transport_poll_group *group; 963 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 964 struct spdk_sock_group grp = {}; 965 966 tqpair.pdu_in_progress = &pdu_in_progress; 967 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 968 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 969 970 tcp_group.sock_group = &grp; 971 TAILQ_INIT(&tcp_group.qpairs); 972 group = &tcp_group.group; 973 group->transport = &ttransport.transport; 974 STAILQ_INIT(&group->pending_buf_queue); 975 tqpair.group = &tcp_group; 976 977 TAILQ_INIT(&tqpair.tcp_req_free_queue); 978 TAILQ_INIT(&tqpair.tcp_req_working_queue); 979 980 tqpair.qpair.transport = &ttransport.transport; 981 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 982 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 983 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 984 985 /* init tcp_req */ 986 tcp_req.req.qpair = &tqpair.qpair; 987 tcp_req.pdu = &rsp_pdu; 988 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 989 tcp_req.req.rsp = &rsp0; 990 tcp_req.state = TCP_REQUEST_STATE_NEW; 991 992 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 993 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 994 995 /* init pdu, make pdu need sgl buff */ 996 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 997 sgl = &capsule_data->ccsqe.dptr.sgl1; 998 999 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1000 capsule_data->common.hlen = sizeof(*capsule_data); 1001 capsule_data->common.plen = 1096; 1002 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1003 /* Need to set to a non zero valid to check it gets copied to the response */ 1004 capsule_data->ccsqe.cid = cid; 1005 1006 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1007 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1008 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1009 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1010 1011 /* Process a command and ensure that it fails and the request is set up to return an error */ 1012 nvmf_tcp_req_process(&ttransport, &tcp_req); 1013 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1014 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1015 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1016 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1017 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1018 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1019 } 1020 1021 static void 1022 test_nvmf_tcp_invalid_sgl(void) 1023 { 1024 const uint16_t cid = 0xAABB; 1025 struct spdk_nvmf_tcp_transport ttransport = {}; 1026 struct spdk_nvmf_tcp_qpair tqpair = {}; 1027 struct nvme_tcp_pdu pdu_in_progress = {}; 1028 union nvmf_c2h_msg rsp0 = {}; 1029 1030 struct spdk_nvmf_tcp_req tcp_req = {}; 1031 struct nvme_tcp_pdu rsp_pdu = {}; 1032 1033 struct spdk_nvme_tcp_cmd *capsule_data; 1034 struct spdk_nvme_sgl_descriptor *sgl; 1035 1036 struct spdk_nvmf_transport_poll_group *group; 1037 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1038 struct spdk_sock_group grp = {}; 1039 1040 tqpair.pdu_in_progress = &pdu_in_progress; 1041 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1042 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1043 1044 tcp_group.sock_group = &grp; 1045 TAILQ_INIT(&tcp_group.qpairs); 1046 group = &tcp_group.group; 1047 group->transport = &ttransport.transport; 1048 STAILQ_INIT(&group->pending_buf_queue); 1049 tqpair.group = &tcp_group; 1050 1051 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1052 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1053 1054 tqpair.qpair.transport = &ttransport.transport; 1055 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1056 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1057 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1058 1059 /* init tcp_req */ 1060 tcp_req.req.qpair = &tqpair.qpair; 1061 tcp_req.pdu = &rsp_pdu; 1062 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1063 tcp_req.req.rsp = &rsp0; 1064 tcp_req.state = TCP_REQUEST_STATE_NEW; 1065 1066 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1067 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1068 1069 /* init pdu, make pdu need sgl buff */ 1070 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1071 sgl = &capsule_data->ccsqe.dptr.sgl1; 1072 1073 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1074 capsule_data->common.hlen = sizeof(*capsule_data); 1075 capsule_data->common.plen = 1096; 1076 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1077 /* Need to set to a non zero valid to check it gets copied to the response */ 1078 capsule_data->ccsqe.cid = cid; 1079 1080 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1081 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1082 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1083 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1084 1085 /* Process a command and ensure that it fails and the request is set up to return an error */ 1086 nvmf_tcp_req_process(&ttransport, &tcp_req); 1087 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1088 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1089 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1090 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1091 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1092 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID); 1093 } 1094 1095 int main(int argc, char **argv) 1096 { 1097 CU_pSuite suite = NULL; 1098 unsigned int num_failures; 1099 1100 CU_set_error_action(CUEA_ABORT); 1101 CU_initialize_registry(); 1102 1103 suite = CU_add_suite("nvmf", NULL, NULL); 1104 1105 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1106 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1107 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1108 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1109 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1110 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1111 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1112 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1113 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1114 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1115 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1116 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1117 1118 CU_basic_set_mode(CU_BRM_VERBOSE); 1119 CU_basic_run_tests(); 1120 num_failures = CU_get_number_of_failures(); 1121 CU_cleanup_registry(); 1122 return num_failures; 1123 } 1124