1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/nvmf_spec.h" 36 #include "spdk_cunit.h" 37 38 #include "spdk_internal/mock.h" 39 40 #include "common/lib/test_env.c" 41 #include "common/lib/test_sock.c" 42 43 #include "nvmf/ctrlr.c" 44 #include "nvmf/tcp.c" 45 46 #define UT_IPV4_ADDR "192.168.0.1" 47 #define UT_PORT "4420" 48 #define UT_NVMF_ADRFAM_INVALID 0xf 49 #define UT_MAX_QUEUE_DEPTH 128 50 #define UT_MAX_QPAIRS_PER_CTRLR 128 51 #define UT_IN_CAPSULE_DATA_SIZE 1024 52 #define UT_MAX_IO_SIZE 4096 53 #define UT_IO_UNIT_SIZE 1024 54 #define UT_MAX_AQ_DEPTH 64 55 #define UT_SQ_HEAD_MAX 128 56 #define UT_NUM_SHARED_BUFFERS 128 57 58 SPDK_LOG_REGISTER_COMPONENT(nvmf) 59 60 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 61 int, 62 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 63 0); 64 65 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 66 int, 67 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 68 0); 69 70 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 71 struct spdk_nvmf_ctrlr *, 72 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 73 NULL); 74 75 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 76 struct spdk_nvmf_subsystem *, 77 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 78 NULL); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 81 bool, 82 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 83 true); 84 85 DEFINE_STUB(nvmf_subsystem_find_listener, 86 struct spdk_nvmf_subsystem_listener *, 87 (struct spdk_nvmf_subsystem *subsystem, 88 const struct spdk_nvme_transport_id *trid), 89 (void *)0x1); 90 91 DEFINE_STUB_V(nvmf_get_discovery_log_page, 92 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 93 uint32_t iovcnt, uint64_t offset, uint32_t length)); 94 95 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 96 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 97 98 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 99 struct spdk_nvmf_ns *, 100 (struct spdk_nvmf_subsystem *subsystem), 101 NULL); 102 103 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 104 struct spdk_nvmf_ns *, 105 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 106 NULL); 107 108 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 109 bool, 110 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 111 true); 112 113 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 114 bool, 115 (struct spdk_nvmf_ctrlr *ctrlr), 116 false); 117 118 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 119 bool, 120 (struct spdk_nvmf_ctrlr *ctrlr), 121 false); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 172 int, 173 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 174 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 175 0); 176 177 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 178 bool, 179 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 180 false); 181 182 DEFINE_STUB(nvmf_transport_req_complete, 183 int, 184 (struct spdk_nvmf_request *req), 185 0); 186 187 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 188 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 189 struct spdk_nvmf_transport *transport)); 190 191 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 192 int, 193 (struct spdk_sock *sock, struct spdk_sock_group **group), 194 0); 195 196 DEFINE_STUB(spdk_sock_group_get_ctx, 197 void *, 198 (struct spdk_sock_group *group), 199 NULL); 200 201 DEFINE_STUB(spdk_sock_set_priority, 202 int, 203 (struct spdk_sock *sock, int priority), 204 0); 205 206 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 207 208 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 209 enum spdk_nvme_transport_type trtype)); 210 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 211 212 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 213 214 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 215 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 216 217 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 218 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 219 220 DEFINE_STUB(nvmf_transport_req_free, 221 int, 222 (struct spdk_nvmf_request *req), 223 0); 224 225 DEFINE_STUB(spdk_accel_engine_get_io_channel, 226 struct spdk_io_channel *, 227 (void), 228 NULL); 229 230 DEFINE_STUB(spdk_accel_submit_crc32c, 231 int, 232 (struct spdk_io_channel *ch, uint32_t *dst, void *src, uint32_t seed, 233 uint64_t nbytes, spdk_accel_completion_cb cb_fn, void *cb_arg), 234 0); 235 236 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 237 int, 238 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 239 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 240 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 241 0) 242 243 struct spdk_trace_histories *g_trace_histories; 244 245 struct spdk_bdev { 246 int ut_mock; 247 uint64_t blockcnt; 248 }; 249 250 int 251 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 252 const struct spdk_nvme_transport_id *trid2) 253 { 254 return 0; 255 } 256 257 void 258 spdk_trace_register_object(uint8_t type, char id_prefix) 259 { 260 } 261 262 void 263 spdk_trace_register_description(const char *name, 264 uint16_t tpoint_id, uint8_t owner_type, 265 uint8_t object_type, uint8_t new_object, 266 uint8_t arg1_type, const char *arg1_name) 267 { 268 } 269 270 void 271 _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 272 uint32_t size, uint64_t object_id, uint64_t arg1) 273 { 274 } 275 276 const char * 277 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 278 { 279 switch (trtype) { 280 case SPDK_NVME_TRANSPORT_PCIE: 281 return "PCIe"; 282 case SPDK_NVME_TRANSPORT_RDMA: 283 return "RDMA"; 284 case SPDK_NVME_TRANSPORT_FC: 285 return "FC"; 286 default: 287 return NULL; 288 } 289 } 290 291 int 292 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 293 { 294 int len, i; 295 296 if (trstring == NULL) { 297 return -EINVAL; 298 } 299 300 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 301 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 302 return -EINVAL; 303 } 304 305 /* cast official trstring to uppercase version of input. */ 306 for (i = 0; i < len; i++) { 307 trid->trstring[i] = toupper(trstring[i]); 308 } 309 return 0; 310 } 311 312 int 313 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 314 { 315 return 0; 316 } 317 318 int 319 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 320 struct spdk_nvmf_transport_poll_group *group, 321 struct spdk_nvmf_transport *transport, 322 uint32_t length) 323 { 324 /* length more than 1 io unit length will fail. */ 325 if (length >= transport->opts.io_unit_size) { 326 return -EINVAL; 327 } 328 329 req->iovcnt = 1; 330 req->iov[0].iov_base = (void *)0xDEADBEEF; 331 332 return 0; 333 } 334 335 336 void 337 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 338 bool dif_insert_or_strip) 339 { 340 uint64_t num_blocks; 341 342 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 343 num_blocks = ns->bdev->blockcnt; 344 nsdata->nsze = num_blocks; 345 nsdata->ncap = num_blocks; 346 nsdata->nuse = num_blocks; 347 nsdata->nlbaf = 0; 348 nsdata->flbas.format = 0; 349 nsdata->lbaf[0].lbads = spdk_u32log2(512); 350 } 351 352 const char * 353 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 354 { 355 return subsystem->sn; 356 } 357 358 const char * 359 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 360 { 361 return subsystem->mn; 362 } 363 364 void 365 spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn) 366 { 367 } 368 369 static void 370 test_nvmf_tcp_create(void) 371 { 372 struct spdk_thread *thread; 373 struct spdk_nvmf_transport *transport; 374 struct spdk_nvmf_tcp_transport *ttransport; 375 struct spdk_nvmf_transport_opts opts; 376 377 thread = spdk_thread_create(NULL, NULL); 378 SPDK_CU_ASSERT_FATAL(thread != NULL); 379 spdk_set_thread(thread); 380 381 /* case 1 */ 382 memset(&opts, 0, sizeof(opts)); 383 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 384 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 385 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 386 opts.max_io_size = UT_MAX_IO_SIZE; 387 opts.io_unit_size = UT_IO_UNIT_SIZE; 388 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 389 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 390 /* expect success */ 391 transport = nvmf_tcp_create(&opts); 392 CU_ASSERT_PTR_NOT_NULL(transport); 393 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 394 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 395 transport->opts = opts; 396 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 397 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 398 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 399 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 400 /* destroy transport */ 401 spdk_mempool_free(ttransport->transport.data_buf_pool); 402 free(ttransport); 403 404 /* case 2 */ 405 memset(&opts, 0, sizeof(opts)); 406 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 407 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 408 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 409 opts.max_io_size = UT_MAX_IO_SIZE; 410 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 411 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 412 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 413 /* expect success */ 414 transport = nvmf_tcp_create(&opts); 415 CU_ASSERT_PTR_NOT_NULL(transport); 416 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 417 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 418 transport->opts = opts; 419 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 420 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 421 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 422 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 423 /* destroy transport */ 424 spdk_mempool_free(ttransport->transport.data_buf_pool); 425 free(ttransport); 426 427 /* case 3 */ 428 memset(&opts, 0, sizeof(opts)); 429 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 430 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 431 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 432 opts.max_io_size = UT_MAX_IO_SIZE; 433 opts.io_unit_size = 16; 434 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 435 /* expect failse */ 436 transport = nvmf_tcp_create(&opts); 437 CU_ASSERT_PTR_NULL(transport); 438 439 spdk_thread_exit(thread); 440 while (!spdk_thread_is_exited(thread)) { 441 spdk_thread_poll(thread, 0, 0); 442 } 443 spdk_thread_destroy(thread); 444 } 445 446 static void 447 test_nvmf_tcp_destroy(void) 448 { 449 struct spdk_thread *thread; 450 struct spdk_nvmf_transport *transport; 451 struct spdk_nvmf_transport_opts opts; 452 453 thread = spdk_thread_create(NULL, NULL); 454 SPDK_CU_ASSERT_FATAL(thread != NULL); 455 spdk_set_thread(thread); 456 457 /* case 1 */ 458 memset(&opts, 0, sizeof(opts)); 459 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 460 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 461 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 462 opts.max_io_size = UT_MAX_IO_SIZE; 463 opts.io_unit_size = UT_IO_UNIT_SIZE; 464 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 465 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 466 transport = nvmf_tcp_create(&opts); 467 CU_ASSERT_PTR_NOT_NULL(transport); 468 transport->opts = opts; 469 /* destroy transport */ 470 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 471 472 spdk_thread_exit(thread); 473 while (!spdk_thread_is_exited(thread)) { 474 spdk_thread_poll(thread, 0, 0); 475 } 476 spdk_thread_destroy(thread); 477 } 478 479 static void 480 test_nvmf_tcp_poll_group_create(void) 481 { 482 struct spdk_nvmf_transport *transport; 483 struct spdk_nvmf_transport_poll_group *group; 484 struct spdk_nvmf_tcp_poll_group *tgroup; 485 struct spdk_thread *thread; 486 struct spdk_nvmf_transport_opts opts; 487 struct spdk_sock_group grp = {}; 488 489 thread = spdk_thread_create(NULL, NULL); 490 SPDK_CU_ASSERT_FATAL(thread != NULL); 491 spdk_set_thread(thread); 492 493 memset(&opts, 0, sizeof(opts)); 494 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 495 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 496 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 497 opts.max_io_size = UT_MAX_IO_SIZE; 498 opts.io_unit_size = UT_IO_UNIT_SIZE; 499 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 500 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 501 transport = nvmf_tcp_create(&opts); 502 CU_ASSERT_PTR_NOT_NULL(transport); 503 transport->opts = opts; 504 MOCK_SET(spdk_sock_group_create, &grp); 505 group = nvmf_tcp_poll_group_create(transport); 506 MOCK_CLEAR_P(spdk_sock_group_create); 507 SPDK_CU_ASSERT_FATAL(group); 508 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 509 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 510 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 511 } 512 group->transport = transport; 513 nvmf_tcp_poll_group_destroy(group); 514 nvmf_tcp_destroy(transport, NULL, NULL); 515 516 spdk_thread_exit(thread); 517 while (!spdk_thread_is_exited(thread)) { 518 spdk_thread_poll(thread, 0, 0); 519 } 520 spdk_thread_destroy(thread); 521 } 522 523 static void 524 test_nvmf_tcp_send_c2h_data(void) 525 { 526 struct spdk_thread *thread; 527 struct spdk_nvmf_tcp_transport ttransport = {}; 528 struct spdk_nvmf_tcp_qpair tqpair = {}; 529 struct spdk_nvmf_tcp_req tcp_req = {}; 530 struct nvme_tcp_pdu pdu = {}; 531 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 532 533 thread = spdk_thread_create(NULL, NULL); 534 SPDK_CU_ASSERT_FATAL(thread != NULL); 535 spdk_set_thread(thread); 536 537 tcp_req.pdu = &pdu; 538 tcp_req.req.length = 300; 539 540 tqpair.qpair.transport = &ttransport.transport; 541 542 /* Set qpair state to make unrelated operations NOP */ 543 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 544 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 545 546 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 547 548 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 549 tcp_req.req.iov[0].iov_len = 101; 550 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 551 tcp_req.req.iov[1].iov_len = 100; 552 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 553 tcp_req.req.iov[2].iov_len = 99; 554 tcp_req.req.iovcnt = 3; 555 tcp_req.req.length = 300; 556 557 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 558 559 c2h_data = &pdu.hdr.c2h_data; 560 CU_ASSERT(c2h_data->datao == 0); 561 CU_ASSERT(c2h_data->datal = 300); 562 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 563 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 564 565 CU_ASSERT(pdu.data_iovcnt == 3); 566 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 567 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 568 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 569 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 570 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 571 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 572 573 spdk_thread_exit(thread); 574 while (!spdk_thread_is_exited(thread)) { 575 spdk_thread_poll(thread, 0, 0); 576 } 577 spdk_thread_destroy(thread); 578 } 579 580 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 581 582 static void 583 test_nvmf_tcp_h2c_data_hdr_handle(void) 584 { 585 struct spdk_nvmf_tcp_transport ttransport = {}; 586 struct spdk_nvmf_tcp_qpair tqpair = {}; 587 struct nvme_tcp_pdu pdu = {}; 588 struct spdk_nvmf_tcp_req tcp_req = {}; 589 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 590 591 TAILQ_INIT(&tqpair.tcp_req_working_queue); 592 593 /* Set qpair state to make unrelated operations NOP */ 594 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 595 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 596 597 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 598 tcp_req.req.iov[0].iov_len = 101; 599 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 600 tcp_req.req.iov[1].iov_len = 99; 601 tcp_req.req.iovcnt = 2; 602 tcp_req.req.length = 200; 603 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 604 605 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 606 tcp_req.req.cmd->nvme_cmd.cid = 1; 607 tcp_req.ttag = 2; 608 609 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, 610 &tcp_req, state_link); 611 612 h2c_data = &pdu.hdr.h2c_data; 613 h2c_data->cccid = 1; 614 h2c_data->ttag = 2; 615 h2c_data->datao = 0; 616 h2c_data->datal = 200; 617 618 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 619 620 CU_ASSERT(pdu.data_iovcnt == 2); 621 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 622 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 623 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 624 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 625 626 CU_ASSERT(TAILQ_FIRST(&tqpair.tcp_req_working_queue) == 627 &tcp_req); 628 TAILQ_REMOVE(&tqpair.tcp_req_working_queue, 629 &tcp_req, state_link); 630 } 631 632 633 static void 634 test_nvmf_tcp_incapsule_data_handle(void) 635 { 636 struct spdk_nvmf_tcp_transport ttransport = {}; 637 struct spdk_nvmf_tcp_qpair tqpair = {}; 638 struct nvme_tcp_pdu *pdu; 639 union nvmf_c2h_msg rsp0 = {}; 640 union nvmf_c2h_msg rsp = {}; 641 642 struct spdk_nvmf_request *req_temp = NULL; 643 struct spdk_nvmf_tcp_req tcp_req2 = {}; 644 struct spdk_nvmf_tcp_req tcp_req1 = {}; 645 646 struct spdk_nvme_tcp_cmd *capsule_data; 647 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 648 struct spdk_nvme_sgl_descriptor *sgl; 649 650 struct spdk_nvmf_transport_poll_group *group; 651 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 652 struct spdk_sock_group grp = {}; 653 654 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 655 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 656 657 tcp_group.sock_group = &grp; 658 TAILQ_INIT(&tcp_group.qpairs); 659 group = &tcp_group.group; 660 group->transport = &ttransport.transport; 661 STAILQ_INIT(&group->pending_buf_queue); 662 tqpair.group = &tcp_group; 663 664 TAILQ_INIT(&tqpair.tcp_req_free_queue); 665 TAILQ_INIT(&tqpair.tcp_req_working_queue); 666 667 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 668 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 669 tqpair.qpair.transport = &ttransport.transport; 670 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 671 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 672 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 673 674 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 675 tcp_req2.req.qpair = &tqpair.qpair; 676 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 677 tcp_req2.req.rsp = &rsp; 678 679 /* init tcp_req1 */ 680 tcp_req1.req.qpair = &tqpair.qpair; 681 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 682 tcp_req1.req.rsp = &rsp0; 683 tcp_req1.state = TCP_REQUEST_STATE_NEW; 684 685 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 686 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 687 688 /* init pdu, make pdu need sgl buff */ 689 pdu = &tqpair.pdu_in_progress; 690 capsule_data = &pdu->hdr.capsule_cmd; 691 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 692 sgl = &capsule_data->ccsqe.dptr.sgl1; 693 694 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 695 capsule_data->common.hlen = sizeof(*capsule_data); 696 capsule_data->common.plen = 1096; 697 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 698 699 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 700 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 701 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 702 703 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 704 705 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 706 nvmf_tcp_req_process(&ttransport, &tcp_req1); 707 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 708 709 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 710 711 /* process tqpair capsule req. but we still remain req in pending_buff. */ 712 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress); 713 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 714 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 715 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 716 if (req_temp == &tcp_req2.req) { 717 break; 718 } 719 } 720 CU_ASSERT(req_temp == NULL); 721 CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2); 722 } 723 724 725 int main(int argc, char **argv) 726 { 727 CU_pSuite suite = NULL; 728 unsigned int num_failures; 729 730 CU_set_error_action(CUEA_ABORT); 731 CU_initialize_registry(); 732 733 suite = CU_add_suite("nvmf", NULL, NULL); 734 735 CU_ADD_TEST(suite, test_nvmf_tcp_create); 736 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 737 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 738 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 739 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 740 CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle); 741 742 CU_basic_set_mode(CU_BRM_VERBOSE); 743 CU_basic_run_tests(); 744 num_failures = CU_get_number_of_failures(); 745 CU_cleanup_registry(); 746 return num_failures; 747 } 748