1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/nvmf_spec.h" 36 #include "spdk_cunit.h" 37 38 #include "spdk_internal/mock.h" 39 40 #include "common/lib/test_env.c" 41 #include "common/lib/test_sock.c" 42 43 #include "nvmf/ctrlr.c" 44 #include "nvmf/tcp.c" 45 46 #define UT_IPV4_ADDR "192.168.0.1" 47 #define UT_PORT "4420" 48 #define UT_NVMF_ADRFAM_INVALID 0xf 49 #define UT_MAX_QUEUE_DEPTH 128 50 #define UT_MAX_QPAIRS_PER_CTRLR 128 51 #define UT_IN_CAPSULE_DATA_SIZE 1024 52 #define UT_MAX_IO_SIZE 4096 53 #define UT_IO_UNIT_SIZE 1024 54 #define UT_MAX_AQ_DEPTH 64 55 #define UT_SQ_HEAD_MAX 128 56 #define UT_NUM_SHARED_BUFFERS 128 57 58 SPDK_LOG_REGISTER_COMPONENT(nvmf) 59 60 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 61 int, 62 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 63 0); 64 65 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 66 int, 67 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 68 0); 69 70 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 71 struct spdk_nvmf_ctrlr *, 72 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 73 NULL); 74 75 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 76 struct spdk_nvmf_subsystem *, 77 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 78 NULL); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 81 bool, 82 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 83 true); 84 85 DEFINE_STUB(nvmf_subsystem_find_listener, 86 struct spdk_nvmf_subsystem_listener *, 87 (struct spdk_nvmf_subsystem *subsystem, 88 const struct spdk_nvme_transport_id *trid), 89 (void *)0x1); 90 91 DEFINE_STUB_V(nvmf_get_discovery_log_page, 92 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 93 uint32_t iovcnt, uint64_t offset, uint32_t length)); 94 95 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 96 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 97 98 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 99 struct spdk_nvmf_ns *, 100 (struct spdk_nvmf_subsystem *subsystem), 101 NULL); 102 103 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 104 struct spdk_nvmf_ns *, 105 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 106 NULL); 107 108 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 109 bool, 110 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 111 true); 112 113 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 114 bool, 115 (struct spdk_nvmf_ctrlr *ctrlr), 116 false); 117 118 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 119 bool, 120 (struct spdk_nvmf_ctrlr *ctrlr), 121 false); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req), 169 0); 170 171 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 172 int, 173 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 174 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 175 0); 176 177 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 178 bool, 179 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 180 false); 181 182 DEFINE_STUB(nvmf_transport_req_complete, 183 int, 184 (struct spdk_nvmf_request *req), 185 0); 186 187 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 188 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 189 struct spdk_nvmf_transport *transport)); 190 191 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 192 int, 193 (struct spdk_sock *sock, struct spdk_sock_group **group), 194 0); 195 196 DEFINE_STUB(spdk_sock_group_get_ctx, 197 void *, 198 (struct spdk_sock_group *group), 199 NULL); 200 201 DEFINE_STUB(spdk_sock_set_priority, 202 int, 203 (struct spdk_sock *sock, int priority), 204 0); 205 206 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 207 208 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 209 enum spdk_nvme_transport_type trtype)); 210 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 211 212 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 213 214 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 215 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 216 217 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 218 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 219 220 DEFINE_STUB(nvmf_transport_req_free, 221 int, 222 (struct spdk_nvmf_request *req), 223 0); 224 225 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 226 int, 227 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 228 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 229 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 230 0) 231 232 struct spdk_trace_histories *g_trace_histories; 233 234 struct spdk_bdev { 235 int ut_mock; 236 uint64_t blockcnt; 237 }; 238 239 int 240 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 241 const struct spdk_nvme_transport_id *trid2) 242 { 243 return 0; 244 } 245 246 void 247 spdk_trace_register_object(uint8_t type, char id_prefix) 248 { 249 } 250 251 void 252 spdk_trace_register_description(const char *name, 253 uint16_t tpoint_id, uint8_t owner_type, 254 uint8_t object_type, uint8_t new_object, 255 uint8_t arg1_type, const char *arg1_name) 256 { 257 } 258 259 void 260 _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, 261 uint32_t size, uint64_t object_id, uint64_t arg1) 262 { 263 } 264 265 const char * 266 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 267 { 268 switch (trtype) { 269 case SPDK_NVME_TRANSPORT_PCIE: 270 return "PCIe"; 271 case SPDK_NVME_TRANSPORT_RDMA: 272 return "RDMA"; 273 case SPDK_NVME_TRANSPORT_FC: 274 return "FC"; 275 default: 276 return NULL; 277 } 278 } 279 280 int 281 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 282 { 283 int len, i; 284 285 if (trstring == NULL) { 286 return -EINVAL; 287 } 288 289 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 290 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 291 return -EINVAL; 292 } 293 294 /* cast official trstring to uppercase version of input. */ 295 for (i = 0; i < len; i++) { 296 trid->trstring[i] = toupper(trstring[i]); 297 } 298 return 0; 299 } 300 301 int 302 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx) 303 { 304 return 0; 305 } 306 307 int 308 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 309 struct spdk_nvmf_transport_poll_group *group, 310 struct spdk_nvmf_transport *transport, 311 uint32_t length) 312 { 313 /* length more than 1 io unit length will fail. */ 314 if (length >= transport->opts.io_unit_size) { 315 return -EINVAL; 316 } 317 318 req->iovcnt = 1; 319 req->iov[0].iov_base = (void *)0xDEADBEEF; 320 321 return 0; 322 } 323 324 325 void 326 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 327 bool dif_insert_or_strip) 328 { 329 uint64_t num_blocks; 330 331 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 332 num_blocks = ns->bdev->blockcnt; 333 nsdata->nsze = num_blocks; 334 nsdata->ncap = num_blocks; 335 nsdata->nuse = num_blocks; 336 nsdata->nlbaf = 0; 337 nsdata->flbas.format = 0; 338 nsdata->lbaf[0].lbads = spdk_u32log2(512); 339 } 340 341 const char * 342 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 343 { 344 return subsystem->sn; 345 } 346 347 const char * 348 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 349 { 350 return subsystem->mn; 351 } 352 353 void 354 spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn) 355 { 356 } 357 358 static void 359 test_nvmf_tcp_create(void) 360 { 361 struct spdk_thread *thread; 362 struct spdk_nvmf_transport *transport; 363 struct spdk_nvmf_tcp_transport *ttransport; 364 struct spdk_nvmf_transport_opts opts; 365 366 thread = spdk_thread_create(NULL, NULL); 367 SPDK_CU_ASSERT_FATAL(thread != NULL); 368 spdk_set_thread(thread); 369 370 /* case 1 */ 371 memset(&opts, 0, sizeof(opts)); 372 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 373 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 374 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 375 opts.max_io_size = UT_MAX_IO_SIZE; 376 opts.io_unit_size = UT_IO_UNIT_SIZE; 377 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 378 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 379 /* expect success */ 380 transport = nvmf_tcp_create(&opts); 381 CU_ASSERT_PTR_NOT_NULL(transport); 382 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 383 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 384 transport->opts = opts; 385 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 386 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 387 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 388 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 389 /* destroy transport */ 390 spdk_mempool_free(ttransport->transport.data_buf_pool); 391 free(ttransport); 392 393 /* case 2 */ 394 memset(&opts, 0, sizeof(opts)); 395 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 396 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 397 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 398 opts.max_io_size = UT_MAX_IO_SIZE; 399 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 400 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 401 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 402 /* expect success */ 403 transport = nvmf_tcp_create(&opts); 404 CU_ASSERT_PTR_NOT_NULL(transport); 405 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 406 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 407 transport->opts = opts; 408 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 409 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 410 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 411 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 412 /* destroy transport */ 413 spdk_mempool_free(ttransport->transport.data_buf_pool); 414 free(ttransport); 415 416 /* case 3 */ 417 memset(&opts, 0, sizeof(opts)); 418 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 419 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 420 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 421 opts.max_io_size = UT_MAX_IO_SIZE; 422 opts.io_unit_size = 16; 423 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 424 /* expect failse */ 425 transport = nvmf_tcp_create(&opts); 426 CU_ASSERT_PTR_NULL(transport); 427 428 spdk_thread_exit(thread); 429 while (!spdk_thread_is_exited(thread)) { 430 spdk_thread_poll(thread, 0, 0); 431 } 432 spdk_thread_destroy(thread); 433 } 434 435 static void 436 test_nvmf_tcp_destroy(void) 437 { 438 struct spdk_thread *thread; 439 struct spdk_nvmf_transport *transport; 440 struct spdk_nvmf_transport_opts opts; 441 442 thread = spdk_thread_create(NULL, NULL); 443 SPDK_CU_ASSERT_FATAL(thread != NULL); 444 spdk_set_thread(thread); 445 446 /* case 1 */ 447 memset(&opts, 0, sizeof(opts)); 448 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 449 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 450 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 451 opts.max_io_size = UT_MAX_IO_SIZE; 452 opts.io_unit_size = UT_IO_UNIT_SIZE; 453 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 454 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 455 transport = nvmf_tcp_create(&opts); 456 CU_ASSERT_PTR_NOT_NULL(transport); 457 transport->opts = opts; 458 /* destroy transport */ 459 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 460 461 spdk_thread_exit(thread); 462 while (!spdk_thread_is_exited(thread)) { 463 spdk_thread_poll(thread, 0, 0); 464 } 465 spdk_thread_destroy(thread); 466 } 467 468 static void 469 test_nvmf_tcp_poll_group_create(void) 470 { 471 struct spdk_nvmf_transport *transport; 472 struct spdk_nvmf_transport_poll_group *group; 473 struct spdk_nvmf_tcp_poll_group *tgroup; 474 struct spdk_thread *thread; 475 struct spdk_nvmf_transport_opts opts; 476 struct spdk_sock_group grp = {}; 477 478 thread = spdk_thread_create(NULL, NULL); 479 SPDK_CU_ASSERT_FATAL(thread != NULL); 480 spdk_set_thread(thread); 481 482 memset(&opts, 0, sizeof(opts)); 483 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 484 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 485 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 486 opts.max_io_size = UT_MAX_IO_SIZE; 487 opts.io_unit_size = UT_IO_UNIT_SIZE; 488 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 489 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 490 transport = nvmf_tcp_create(&opts); 491 CU_ASSERT_PTR_NOT_NULL(transport); 492 transport->opts = opts; 493 MOCK_SET(spdk_sock_group_create, &grp); 494 group = nvmf_tcp_poll_group_create(transport); 495 MOCK_CLEAR_P(spdk_sock_group_create); 496 SPDK_CU_ASSERT_FATAL(group); 497 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 498 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 499 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 500 } 501 group->transport = transport; 502 nvmf_tcp_poll_group_destroy(group); 503 nvmf_tcp_destroy(transport, NULL, NULL); 504 505 spdk_thread_exit(thread); 506 while (!spdk_thread_is_exited(thread)) { 507 spdk_thread_poll(thread, 0, 0); 508 } 509 spdk_thread_destroy(thread); 510 } 511 512 static void 513 test_nvmf_tcp_send_c2h_data(void) 514 { 515 struct spdk_thread *thread; 516 struct spdk_nvmf_tcp_transport ttransport = {}; 517 struct spdk_nvmf_tcp_qpair tqpair = {}; 518 struct spdk_nvmf_tcp_req tcp_req = {}; 519 struct nvme_tcp_pdu pdu = {}; 520 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 521 522 thread = spdk_thread_create(NULL, NULL); 523 SPDK_CU_ASSERT_FATAL(thread != NULL); 524 spdk_set_thread(thread); 525 526 tcp_req.pdu = &pdu; 527 tcp_req.req.length = 300; 528 529 tqpair.qpair.transport = &ttransport.transport; 530 TAILQ_INIT(&tqpair.send_queue); 531 532 /* Set qpair state to make unrelated operations NOP */ 533 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 534 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 535 536 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 537 538 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 539 tcp_req.req.iov[0].iov_len = 101; 540 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 541 tcp_req.req.iov[1].iov_len = 100; 542 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 543 tcp_req.req.iov[2].iov_len = 99; 544 tcp_req.req.iovcnt = 3; 545 tcp_req.req.length = 300; 546 547 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 548 549 CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu); 550 TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq); 551 552 c2h_data = &pdu.hdr.c2h_data; 553 CU_ASSERT(c2h_data->datao == 0); 554 CU_ASSERT(c2h_data->datal = 300); 555 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 556 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 557 558 CU_ASSERT(pdu.data_iovcnt == 3); 559 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 560 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 561 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 562 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 563 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 564 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 565 566 spdk_thread_exit(thread); 567 while (!spdk_thread_is_exited(thread)) { 568 spdk_thread_poll(thread, 0, 0); 569 } 570 spdk_thread_destroy(thread); 571 } 572 573 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 574 575 static void 576 test_nvmf_tcp_h2c_data_hdr_handle(void) 577 { 578 struct spdk_nvmf_tcp_transport ttransport = {}; 579 struct spdk_nvmf_tcp_qpair tqpair = {}; 580 struct nvme_tcp_pdu pdu = {}; 581 struct spdk_nvmf_tcp_req tcp_req = {}; 582 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 583 584 TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]); 585 586 /* Set qpair state to make unrelated operations NOP */ 587 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 588 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 589 590 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 591 tcp_req.req.iov[0].iov_len = 101; 592 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 593 tcp_req.req.iov[1].iov_len = 99; 594 tcp_req.req.iovcnt = 2; 595 tcp_req.req.length = 200; 596 597 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 598 tcp_req.req.cmd->nvme_cmd.cid = 1; 599 tcp_req.ttag = 2; 600 601 TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER], 602 &tcp_req, state_link); 603 604 h2c_data = &pdu.hdr.h2c_data; 605 h2c_data->cccid = 1; 606 h2c_data->ttag = 2; 607 h2c_data->datao = 0; 608 h2c_data->datal = 200; 609 610 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 611 612 CU_ASSERT(pdu.data_iovcnt == 2); 613 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 614 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 615 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 616 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 617 618 CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) == 619 &tcp_req); 620 TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER], 621 &tcp_req, state_link); 622 } 623 624 625 static void 626 test_nvmf_tcp_incapsule_data_handle(void) 627 { 628 struct spdk_nvmf_tcp_transport ttransport = {}; 629 struct spdk_nvmf_tcp_qpair tqpair = {}; 630 struct nvme_tcp_pdu *pdu; 631 union nvmf_c2h_msg rsp0 = {}; 632 union nvmf_c2h_msg rsp = {}; 633 634 struct spdk_nvmf_request *req_temp = NULL; 635 struct spdk_nvmf_tcp_req tcp_req2 = {}; 636 struct spdk_nvmf_tcp_req tcp_req1 = {}; 637 638 struct spdk_nvme_tcp_cmd *capsule_data; 639 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 640 struct spdk_nvme_sgl_descriptor *sgl; 641 642 struct spdk_nvmf_transport_poll_group *group; 643 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 644 struct spdk_sock_group grp = {}; 645 int i = 0; 646 647 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 648 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 649 650 tcp_group.sock_group = &grp; 651 TAILQ_INIT(&tcp_group.qpairs); 652 group = &tcp_group.group; 653 group->transport = &ttransport.transport; 654 STAILQ_INIT(&group->pending_buf_queue); 655 tqpair.group = &tcp_group; 656 657 /* init tqpair, add pdu to pdu_in_progress and wait for the buff */ 658 for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) { 659 TAILQ_INIT(&tqpair.state_queue[i]); 660 } 661 662 TAILQ_INIT(&tqpair.send_queue); 663 664 TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link); 665 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 666 tqpair.qpair.transport = &ttransport.transport; 667 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 668 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 669 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 670 671 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 672 tcp_req2.req.qpair = &tqpair.qpair; 673 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 674 tcp_req2.req.rsp = &rsp; 675 676 /* init tcp_req1 */ 677 tcp_req1.req.qpair = &tqpair.qpair; 678 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 679 tcp_req1.req.rsp = &rsp0; 680 tcp_req1.state = TCP_REQUEST_STATE_NEW; 681 682 TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link); 683 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 684 685 /* init pdu, make pdu need sgl buff */ 686 pdu = &tqpair.pdu_in_progress; 687 capsule_data = &pdu->hdr.capsule_cmd; 688 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 689 sgl = &capsule_data->ccsqe.dptr.sgl1; 690 691 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 692 capsule_data->common.hlen = sizeof(*capsule_data); 693 capsule_data->common.plen = 1096; 694 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 695 696 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 697 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 698 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 699 700 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 701 702 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 703 nvmf_tcp_req_process(&ttransport, &tcp_req1); 704 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 705 706 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 707 708 /* process tqpair capsule req. but we still remain req in pending_buff. */ 709 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress); 710 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 711 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 712 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 713 if (req_temp == &tcp_req2.req) { 714 break; 715 } 716 } 717 CU_ASSERT(req_temp == NULL); 718 CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2); 719 } 720 721 722 int main(int argc, char **argv) 723 { 724 CU_pSuite suite = NULL; 725 unsigned int num_failures; 726 727 CU_set_error_action(CUEA_ABORT); 728 CU_initialize_registry(); 729 730 suite = CU_add_suite("nvmf", NULL, NULL); 731 732 CU_ADD_TEST(suite, test_nvmf_tcp_create); 733 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 734 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 735 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 736 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 737 CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle); 738 739 CU_basic_set_mode(CU_BRM_VERBOSE); 740 CU_basic_run_tests(); 741 num_failures = CU_get_number_of_failures(); 742 CU_cleanup_registry(); 743 return num_failures; 744 } 745