1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 90 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 91 bool, 92 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 93 true); 94 95 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 96 bool, 97 (struct spdk_nvmf_ctrlr *ctrlr), 98 false); 99 100 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 101 bool, 102 (struct spdk_nvmf_ctrlr *ctrlr), 103 false); 104 105 DEFINE_STUB(nvmf_ctrlr_copy_supported, 106 bool, 107 (struct spdk_nvmf_ctrlr *ctrlr), 108 false); 109 110 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 111 int, 112 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 113 struct spdk_nvmf_request *req), 114 0); 115 116 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 117 int, 118 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 119 struct spdk_nvmf_request *req), 120 0); 121 122 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 123 int, 124 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 125 struct spdk_nvmf_request *req), 126 0); 127 128 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 129 int, 130 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 131 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 132 0); 133 134 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 135 int, 136 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 137 struct spdk_nvmf_request *req), 138 0); 139 140 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 141 int, 142 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 143 struct spdk_nvmf_request *req), 144 0); 145 146 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 147 int, 148 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 149 struct spdk_nvmf_request *req), 150 0); 151 152 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 153 int, 154 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 155 struct spdk_nvmf_request *req), 156 0); 157 158 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 159 int, 160 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 161 struct spdk_nvmf_request *req), 162 0); 163 164 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 165 int, 166 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 167 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 168 0); 169 170 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 171 bool, 172 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 173 false); 174 175 DEFINE_STUB(nvmf_transport_req_complete, 176 int, 177 (struct spdk_nvmf_request *req), 178 0); 179 180 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 181 bool, 182 (struct spdk_bdev *bdev), 183 false); 184 185 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 186 int, 187 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 188 struct spdk_nvmf_request *req), 189 0); 190 191 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 192 193 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 194 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 195 struct spdk_nvmf_transport *transport)); 196 197 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 198 int, 199 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 200 0); 201 202 DEFINE_STUB(spdk_sock_group_get_ctx, 203 void *, 204 (struct spdk_sock_group *group), 205 NULL); 206 207 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 208 209 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 210 enum spdk_nvme_transport_type trtype)); 211 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 212 213 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 214 215 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 216 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 217 218 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 219 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 220 221 DEFINE_STUB(nvmf_transport_req_free, 222 int, 223 (struct spdk_nvmf_request *req), 224 0); 225 226 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 227 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 228 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 229 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 230 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 231 232 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 233 (const struct spdk_bdev *bdev), 0); 234 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 235 (const struct spdk_bdev *bdev), 0); 236 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 237 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 238 239 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 240 (const struct spdk_nvme_ns_data *nsdata), 0); 241 242 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 243 244 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 245 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 246 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 247 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 248 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 249 250 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 251 252 struct spdk_io_channel * 253 spdk_accel_get_io_channel(void) 254 { 255 return spdk_get_io_channel(g_accel_p); 256 } 257 258 DEFINE_STUB(spdk_accel_submit_crc32cv, 259 int, 260 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 261 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 262 0); 263 264 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 265 int, 266 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 267 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 268 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 269 0) 270 271 struct spdk_bdev { 272 int ut_mock; 273 uint64_t blockcnt; 274 }; 275 276 int 277 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 278 const struct spdk_nvme_transport_id *trid2) 279 { 280 return 0; 281 } 282 283 const char * 284 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 285 { 286 switch (trtype) { 287 case SPDK_NVME_TRANSPORT_PCIE: 288 return "PCIe"; 289 case SPDK_NVME_TRANSPORT_RDMA: 290 return "RDMA"; 291 case SPDK_NVME_TRANSPORT_FC: 292 return "FC"; 293 default: 294 return NULL; 295 } 296 } 297 298 int 299 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 300 { 301 int len, i; 302 303 if (trstring == NULL) { 304 return -EINVAL; 305 } 306 307 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 308 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 309 return -EINVAL; 310 } 311 312 /* cast official trstring to uppercase version of input. */ 313 for (i = 0; i < len; i++) { 314 trid->trstring[i] = toupper(trstring[i]); 315 } 316 return 0; 317 } 318 319 int 320 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 321 struct spdk_nvmf_transport_poll_group *group, 322 struct spdk_nvmf_transport *transport, 323 uint32_t length) 324 { 325 /* length more than 1 io unit length will fail. */ 326 if (length >= transport->opts.io_unit_size) { 327 return -EINVAL; 328 } 329 330 req->iovcnt = 1; 331 req->iov[0].iov_base = (void *)0xDEADBEEF; 332 333 return 0; 334 } 335 336 337 void 338 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 339 bool dif_insert_or_strip) 340 { 341 uint64_t num_blocks; 342 343 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 344 num_blocks = ns->bdev->blockcnt; 345 nsdata->nsze = num_blocks; 346 nsdata->ncap = num_blocks; 347 nsdata->nuse = num_blocks; 348 nsdata->nlbaf = 0; 349 nsdata->flbas.format = 0; 350 nsdata->flbas.msb_format = 0; 351 nsdata->lbaf[0].lbads = spdk_u32log2(512); 352 } 353 354 const char * 355 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 356 { 357 return subsystem->sn; 358 } 359 360 const char * 361 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 362 { 363 return subsystem->mn; 364 } 365 366 static void 367 test_nvmf_tcp_create(void) 368 { 369 struct spdk_thread *thread; 370 struct spdk_nvmf_transport *transport; 371 struct spdk_nvmf_tcp_transport *ttransport; 372 struct spdk_nvmf_transport_opts opts; 373 374 thread = spdk_thread_create(NULL, NULL); 375 SPDK_CU_ASSERT_FATAL(thread != NULL); 376 spdk_set_thread(thread); 377 378 /* case 1 */ 379 memset(&opts, 0, sizeof(opts)); 380 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 381 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 382 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 383 opts.max_io_size = UT_MAX_IO_SIZE; 384 opts.io_unit_size = UT_IO_UNIT_SIZE; 385 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 386 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 387 /* expect success */ 388 transport = nvmf_tcp_create(&opts); 389 CU_ASSERT_PTR_NOT_NULL(transport); 390 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 391 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 392 transport->opts = opts; 393 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 394 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 395 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 396 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 397 /* destroy transport */ 398 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 399 400 /* case 2 */ 401 memset(&opts, 0, sizeof(opts)); 402 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 403 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 404 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 405 opts.max_io_size = UT_MAX_IO_SIZE; 406 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 407 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 408 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 409 /* expect success */ 410 transport = nvmf_tcp_create(&opts); 411 CU_ASSERT_PTR_NOT_NULL(transport); 412 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 413 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 414 transport->opts = opts; 415 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 416 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 417 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 418 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 419 /* destroy transport */ 420 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 421 422 /* case 3 */ 423 memset(&opts, 0, sizeof(opts)); 424 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 425 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 426 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 427 opts.max_io_size = UT_MAX_IO_SIZE; 428 opts.io_unit_size = 16; 429 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 430 /* expect fails */ 431 transport = nvmf_tcp_create(&opts); 432 CU_ASSERT_PTR_NULL(transport); 433 434 spdk_thread_exit(thread); 435 while (!spdk_thread_is_exited(thread)) { 436 spdk_thread_poll(thread, 0, 0); 437 } 438 spdk_thread_destroy(thread); 439 } 440 441 static void 442 test_nvmf_tcp_destroy(void) 443 { 444 struct spdk_thread *thread; 445 struct spdk_nvmf_transport *transport; 446 struct spdk_nvmf_transport_opts opts; 447 448 thread = spdk_thread_create(NULL, NULL); 449 SPDK_CU_ASSERT_FATAL(thread != NULL); 450 spdk_set_thread(thread); 451 452 /* case 1 */ 453 memset(&opts, 0, sizeof(opts)); 454 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 455 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 456 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 457 opts.max_io_size = UT_MAX_IO_SIZE; 458 opts.io_unit_size = UT_IO_UNIT_SIZE; 459 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 460 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 461 transport = nvmf_tcp_create(&opts); 462 CU_ASSERT_PTR_NOT_NULL(transport); 463 transport->opts = opts; 464 /* destroy transport */ 465 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 466 467 spdk_thread_exit(thread); 468 while (!spdk_thread_is_exited(thread)) { 469 spdk_thread_poll(thread, 0, 0); 470 } 471 spdk_thread_destroy(thread); 472 } 473 474 static void 475 init_accel(void) 476 { 477 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 478 sizeof(int), "accel_p"); 479 } 480 481 static void 482 fini_accel(void) 483 { 484 spdk_io_device_unregister(g_accel_p, NULL); 485 } 486 487 static void 488 test_nvmf_tcp_poll_group_create(void) 489 { 490 struct spdk_nvmf_transport *transport; 491 struct spdk_nvmf_transport_poll_group *group; 492 struct spdk_nvmf_tcp_poll_group *tgroup; 493 struct spdk_thread *thread; 494 struct spdk_nvmf_transport_opts opts; 495 struct spdk_sock_group grp = {}; 496 497 thread = spdk_thread_create(NULL, NULL); 498 SPDK_CU_ASSERT_FATAL(thread != NULL); 499 spdk_set_thread(thread); 500 501 init_accel(); 502 503 memset(&opts, 0, sizeof(opts)); 504 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 505 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 506 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 507 opts.max_io_size = UT_MAX_IO_SIZE; 508 opts.io_unit_size = UT_IO_UNIT_SIZE; 509 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 510 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 511 transport = nvmf_tcp_create(&opts); 512 CU_ASSERT_PTR_NOT_NULL(transport); 513 transport->opts = opts; 514 MOCK_SET(spdk_sock_group_create, &grp); 515 group = nvmf_tcp_poll_group_create(transport, NULL); 516 MOCK_CLEAR_P(spdk_sock_group_create); 517 SPDK_CU_ASSERT_FATAL(group); 518 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 519 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 520 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 521 } 522 group->transport = transport; 523 nvmf_tcp_poll_group_destroy(group); 524 nvmf_tcp_destroy(transport, NULL, NULL); 525 526 fini_accel(); 527 spdk_thread_exit(thread); 528 while (!spdk_thread_is_exited(thread)) { 529 spdk_thread_poll(thread, 0, 0); 530 } 531 spdk_thread_destroy(thread); 532 } 533 534 static void 535 test_nvmf_tcp_send_c2h_data(void) 536 { 537 struct spdk_thread *thread; 538 struct spdk_nvmf_tcp_transport ttransport = {}; 539 struct spdk_nvmf_tcp_qpair tqpair = {}; 540 struct spdk_nvmf_tcp_req tcp_req = {}; 541 struct nvme_tcp_pdu pdu = {}; 542 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 543 544 ttransport.tcp_opts.c2h_success = true; 545 thread = spdk_thread_create(NULL, NULL); 546 SPDK_CU_ASSERT_FATAL(thread != NULL); 547 spdk_set_thread(thread); 548 549 tcp_req.pdu = &pdu; 550 tcp_req.req.length = 300; 551 tcp_req.req.qpair = &tqpair.qpair; 552 553 tqpair.qpair.transport = &ttransport.transport; 554 555 /* Set qpair state to make unrelated operations NOP */ 556 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 557 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 558 559 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 560 561 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 562 tcp_req.req.iov[0].iov_len = 101; 563 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 564 tcp_req.req.iov[1].iov_len = 100; 565 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 566 tcp_req.req.iov[2].iov_len = 99; 567 tcp_req.req.iovcnt = 3; 568 tcp_req.req.length = 300; 569 570 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 571 572 c2h_data = &pdu.hdr.c2h_data; 573 CU_ASSERT(c2h_data->datao == 0); 574 CU_ASSERT(c2h_data->datal = 300); 575 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 576 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 577 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 578 579 CU_ASSERT(pdu.data_iovcnt == 3); 580 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 581 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 582 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 583 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 584 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 585 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 586 587 tcp_req.pdu_in_use = false; 588 tcp_req.rsp.cdw0 = 1; 589 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 590 591 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 592 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 593 594 ttransport.tcp_opts.c2h_success = false; 595 tcp_req.pdu_in_use = false; 596 tcp_req.rsp.cdw0 = 0; 597 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 598 599 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 600 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 601 602 tcp_req.pdu_in_use = false; 603 tcp_req.rsp.cdw0 = 1; 604 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 605 606 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 607 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 608 609 spdk_thread_exit(thread); 610 while (!spdk_thread_is_exited(thread)) { 611 spdk_thread_poll(thread, 0, 0); 612 } 613 spdk_thread_destroy(thread); 614 } 615 616 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 617 618 static void 619 test_nvmf_tcp_h2c_data_hdr_handle(void) 620 { 621 struct spdk_nvmf_tcp_transport ttransport = {}; 622 struct spdk_nvmf_tcp_qpair tqpair = {}; 623 struct nvme_tcp_pdu pdu = {}; 624 struct spdk_nvmf_tcp_req tcp_req = {}; 625 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 626 627 /* Set qpair state to make unrelated operations NOP */ 628 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 629 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 630 tqpair.resource_count = 1; 631 tqpair.reqs = &tcp_req; 632 633 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 634 tcp_req.req.iov[0].iov_len = 101; 635 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 636 tcp_req.req.iov[1].iov_len = 99; 637 tcp_req.req.iovcnt = 2; 638 tcp_req.req.length = 200; 639 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 640 641 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 642 tcp_req.req.cmd->nvme_cmd.cid = 1; 643 tcp_req.ttag = 1; 644 645 h2c_data = &pdu.hdr.h2c_data; 646 h2c_data->cccid = 1; 647 h2c_data->ttag = 1; 648 h2c_data->datao = 0; 649 h2c_data->datal = 200; 650 651 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 652 653 CU_ASSERT(pdu.data_iovcnt == 2); 654 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 655 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 656 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 657 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 658 } 659 660 661 static void 662 test_nvmf_tcp_in_capsule_data_handle(void) 663 { 664 struct spdk_nvmf_tcp_transport ttransport = {}; 665 struct spdk_nvmf_tcp_qpair tqpair = {}; 666 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 667 union nvmf_c2h_msg rsp0 = {}; 668 union nvmf_c2h_msg rsp = {}; 669 670 struct spdk_nvmf_request *req_temp = NULL; 671 struct spdk_nvmf_tcp_req tcp_req2 = {}; 672 struct spdk_nvmf_tcp_req tcp_req1 = {}; 673 674 struct spdk_nvme_tcp_cmd *capsule_data; 675 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 676 struct spdk_nvme_sgl_descriptor *sgl; 677 678 struct spdk_nvmf_transport_poll_group *group; 679 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 680 struct spdk_sock_group grp = {}; 681 682 tqpair.pdu_in_progress = &pdu_in_progress; 683 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 684 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 685 686 tcp_group.sock_group = &grp; 687 TAILQ_INIT(&tcp_group.qpairs); 688 group = &tcp_group.group; 689 group->transport = &ttransport.transport; 690 STAILQ_INIT(&group->pending_buf_queue); 691 tqpair.group = &tcp_group; 692 693 TAILQ_INIT(&tqpair.tcp_req_free_queue); 694 TAILQ_INIT(&tqpair.tcp_req_working_queue); 695 696 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 697 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 698 tqpair.qpair.transport = &ttransport.transport; 699 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 700 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 701 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 702 703 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 704 tcp_req2.req.qpair = &tqpair.qpair; 705 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 706 tcp_req2.req.rsp = &rsp; 707 708 /* init tcp_req1 */ 709 tcp_req1.req.qpair = &tqpair.qpair; 710 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 711 tcp_req1.req.rsp = &rsp0; 712 tcp_req1.state = TCP_REQUEST_STATE_NEW; 713 714 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 715 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 716 717 /* init pdu, make pdu need sgl buff */ 718 pdu = tqpair.pdu_in_progress; 719 capsule_data = &pdu->hdr.capsule_cmd; 720 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 721 sgl = &capsule_data->ccsqe.dptr.sgl1; 722 723 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 724 capsule_data->common.hlen = sizeof(*capsule_data); 725 capsule_data->common.plen = 1096; 726 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 727 728 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 729 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 730 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 731 732 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 733 734 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 735 nvmf_tcp_req_process(&ttransport, &tcp_req1); 736 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 737 738 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 739 740 /* process tqpair capsule req. but we still remain req in pending_buff. */ 741 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 742 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 743 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 744 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 745 if (req_temp == &tcp_req2.req) { 746 break; 747 } 748 } 749 CU_ASSERT(req_temp == NULL); 750 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 751 } 752 753 static void 754 test_nvmf_tcp_qpair_init_mem_resource(void) 755 { 756 int rc; 757 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 758 struct spdk_nvmf_transport transport = {}; 759 struct spdk_thread *thread; 760 761 thread = spdk_thread_create(NULL, NULL); 762 SPDK_CU_ASSERT_FATAL(thread != NULL); 763 spdk_set_thread(thread); 764 765 tqpair = calloc(1, sizeof(*tqpair)); 766 tqpair->qpair.transport = &transport; 767 768 nvmf_tcp_opts_init(&transport.opts); 769 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 770 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 771 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 772 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 773 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 774 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 775 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 776 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 777 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 778 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 779 CU_ASSERT(transport.opts.transport_specific == NULL); 780 781 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 782 CU_ASSERT(rc == 0); 783 CU_ASSERT(tqpair->host_hdgst_enable == true); 784 CU_ASSERT(tqpair->host_ddgst_enable == true); 785 786 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 787 CU_ASSERT(rc == 0); 788 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 789 CU_ASSERT(tqpair->reqs != NULL); 790 CU_ASSERT(tqpair->bufs != NULL); 791 CU_ASSERT(tqpair->pdus != NULL); 792 /* Just to check the first and last entry */ 793 CU_ASSERT(tqpair->reqs[0].ttag == 1); 794 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 795 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 796 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 797 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 798 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 799 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 800 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 801 CU_ASSERT(tqpair->reqs[127].ttag == 128); 802 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 803 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 804 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 805 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 806 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 807 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 808 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 809 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 810 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 811 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 812 CU_ASSERT(tqpair->pdu_in_progress == 813 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 814 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 815 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 816 817 /* Free all of tqpair resource */ 818 nvmf_tcp_qpair_destroy(tqpair); 819 820 spdk_thread_exit(thread); 821 while (!spdk_thread_is_exited(thread)) { 822 spdk_thread_poll(thread, 0, 0); 823 } 824 spdk_thread_destroy(thread); 825 } 826 827 static void 828 test_nvmf_tcp_send_c2h_term_req(void) 829 { 830 struct spdk_nvmf_tcp_qpair tqpair = {}; 831 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 832 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 833 uint32_t error_offset = 1; 834 835 mgmt_pdu.qpair = &tqpair; 836 tqpair.mgmt_pdu = &mgmt_pdu; 837 tqpair.pdu_in_progress = &pdu_in_progress; 838 tqpair.tcp_pdu_working_count = 1; 839 840 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 841 pdu.hdr.common.hlen = 64; 842 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 843 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 844 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 845 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 846 pdu.hdr.common.hlen); 847 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 848 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 849 850 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 851 pdu.hdr.common.hlen = 255; 852 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 853 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 854 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 855 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 856 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 857 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 858 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 859 } 860 861 static void 862 test_nvmf_tcp_send_capsule_resp_pdu(void) 863 { 864 struct spdk_nvmf_tcp_req tcp_req = {}; 865 struct spdk_nvmf_tcp_qpair tqpair = {}; 866 struct nvme_tcp_pdu pdu = {}; 867 868 tcp_req.pdu_in_use = false; 869 tcp_req.req.qpair = &tqpair.qpair; 870 tcp_req.pdu = &pdu; 871 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 872 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 873 tqpair.host_hdgst_enable = true; 874 875 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 876 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 877 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 878 SPDK_NVME_TCP_DIGEST_LEN); 879 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 880 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 881 sizeof(struct spdk_nvme_cpl))); 882 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 883 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 884 CU_ASSERT(pdu.cb_arg == &tcp_req); 885 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 886 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 887 888 /* hdgst disable */ 889 tqpair.host_hdgst_enable = false; 890 tcp_req.pdu_in_use = false; 891 memset(&pdu, 0, sizeof(pdu)); 892 893 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 894 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 895 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 896 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 897 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 898 sizeof(struct spdk_nvme_cpl))); 899 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 900 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 901 CU_ASSERT(pdu.cb_arg == &tcp_req); 902 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 903 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 904 } 905 906 static void 907 test_nvmf_tcp_icreq_handle(void) 908 { 909 struct spdk_nvmf_tcp_transport ttransport = {}; 910 struct spdk_nvmf_tcp_qpair tqpair = {}; 911 struct nvme_tcp_pdu pdu = {}; 912 struct nvme_tcp_pdu mgmt_pdu = {}; 913 struct nvme_tcp_pdu pdu_in_progress = {}; 914 struct spdk_nvme_tcp_ic_resp *ic_resp; 915 916 mgmt_pdu.qpair = &tqpair; 917 tqpair.mgmt_pdu = &mgmt_pdu; 918 tqpair.pdu_in_progress = &pdu_in_progress; 919 tqpair.tcp_pdu_working_count = 1; 920 921 /* case 1: Expected ICReq PFV 0 and got are different. */ 922 pdu.hdr.ic_req.pfv = 1; 923 924 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 925 926 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 927 928 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 929 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 930 931 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 932 933 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 934 935 /* case 3: Expect: PASS. */ 936 ttransport.transport.opts.max_io_size = 32; 937 pdu.hdr.ic_req.pfv = 0; 938 tqpair.host_hdgst_enable = false; 939 tqpair.host_ddgst_enable = false; 940 tqpair.recv_buf_size = 64; 941 pdu.hdr.ic_req.hpda = 16; 942 943 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 944 945 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 946 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 947 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 948 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 949 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 950 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 951 CU_ASSERT(ic_resp->pfv == 0); 952 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 953 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 954 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 955 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 956 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 957 } 958 959 static void 960 test_nvmf_tcp_check_xfer_type(void) 961 { 962 const uint16_t cid = 0xAA; 963 struct spdk_nvmf_tcp_transport ttransport = {}; 964 struct spdk_nvmf_tcp_qpair tqpair = {}; 965 struct nvme_tcp_pdu pdu_in_progress = {}; 966 union nvmf_c2h_msg rsp0 = {}; 967 968 struct spdk_nvmf_tcp_req tcp_req = {}; 969 struct nvme_tcp_pdu rsp_pdu = {}; 970 971 struct spdk_nvme_tcp_cmd *capsule_data; 972 struct spdk_nvme_sgl_descriptor *sgl; 973 974 struct spdk_nvmf_transport_poll_group *group; 975 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 976 struct spdk_sock_group grp = {}; 977 978 tqpair.pdu_in_progress = &pdu_in_progress; 979 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 980 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 981 982 tcp_group.sock_group = &grp; 983 TAILQ_INIT(&tcp_group.qpairs); 984 group = &tcp_group.group; 985 group->transport = &ttransport.transport; 986 STAILQ_INIT(&group->pending_buf_queue); 987 tqpair.group = &tcp_group; 988 989 TAILQ_INIT(&tqpair.tcp_req_free_queue); 990 TAILQ_INIT(&tqpair.tcp_req_working_queue); 991 992 tqpair.qpair.transport = &ttransport.transport; 993 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 994 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 995 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 996 997 /* init tcp_req */ 998 tcp_req.req.qpair = &tqpair.qpair; 999 tcp_req.pdu = &rsp_pdu; 1000 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1001 tcp_req.req.rsp = &rsp0; 1002 tcp_req.state = TCP_REQUEST_STATE_NEW; 1003 1004 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1005 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1006 1007 /* init pdu, make pdu need sgl buff */ 1008 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1009 sgl = &capsule_data->ccsqe.dptr.sgl1; 1010 1011 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1012 capsule_data->common.hlen = sizeof(*capsule_data); 1013 capsule_data->common.plen = 1096; 1014 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1015 /* Need to set to a non zero valid to check it gets copied to the response */ 1016 capsule_data->ccsqe.cid = cid; 1017 1018 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1019 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1020 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1021 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1022 1023 /* Process a command and ensure that it fails and the request is set up to return an error */ 1024 nvmf_tcp_req_process(&ttransport, &tcp_req); 1025 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1026 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1027 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1028 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1029 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1030 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1031 } 1032 1033 static void 1034 test_nvmf_tcp_invalid_sgl(void) 1035 { 1036 const uint16_t cid = 0xAABB; 1037 struct spdk_nvmf_tcp_transport ttransport = {}; 1038 struct spdk_nvmf_tcp_qpair tqpair = {}; 1039 struct nvme_tcp_pdu pdu_in_progress = {}; 1040 union nvmf_c2h_msg rsp0 = {}; 1041 1042 struct spdk_nvmf_tcp_req tcp_req = {}; 1043 struct nvme_tcp_pdu rsp_pdu = {}; 1044 struct nvme_tcp_pdu mgmt_pdu = {}; 1045 1046 struct spdk_nvme_tcp_cmd *capsule_data; 1047 struct spdk_nvme_sgl_descriptor *sgl; 1048 1049 struct spdk_nvmf_transport_poll_group *group; 1050 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1051 struct spdk_sock_group grp = {}; 1052 1053 tqpair.pdu_in_progress = &pdu_in_progress; 1054 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1055 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1056 1057 tcp_group.sock_group = &grp; 1058 TAILQ_INIT(&tcp_group.qpairs); 1059 group = &tcp_group.group; 1060 group->transport = &ttransport.transport; 1061 STAILQ_INIT(&group->pending_buf_queue); 1062 tqpair.group = &tcp_group; 1063 1064 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1065 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1066 1067 tqpair.qpair.transport = &ttransport.transport; 1068 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1069 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1070 tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE; 1071 1072 /* init tcp_req */ 1073 tcp_req.req.qpair = &tqpair.qpair; 1074 tcp_req.pdu = &rsp_pdu; 1075 tcp_req.pdu->qpair = &tqpair; 1076 tqpair.mgmt_pdu = &mgmt_pdu; 1077 tqpair.mgmt_pdu->qpair = &tqpair; 1078 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1079 tcp_req.req.rsp = &rsp0; 1080 tcp_req.state = TCP_REQUEST_STATE_NEW; 1081 1082 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1083 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1084 1085 /* init pdu, make pdu need sgl buff */ 1086 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1087 sgl = &capsule_data->ccsqe.dptr.sgl1; 1088 1089 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1090 capsule_data->common.hlen = sizeof(*capsule_data); 1091 capsule_data->common.plen = 1096; 1092 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1093 /* Need to set to a non zero valid to check it gets copied to the response */ 1094 capsule_data->ccsqe.cid = cid; 1095 1096 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1097 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1098 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1099 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1100 1101 /* Process a command and ensure that it fails and the request is set up to return an error */ 1102 nvmf_tcp_req_process(&ttransport, &tcp_req); 1103 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1104 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1105 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1106 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1107 } 1108 1109 static void 1110 test_nvmf_tcp_pdu_ch_handle(void) 1111 { 1112 struct spdk_nvmf_tcp_qpair tqpair = {}; 1113 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1114 1115 mgmt_pdu.qpair = &tqpair; 1116 tqpair.mgmt_pdu = &mgmt_pdu; 1117 tqpair.pdu_in_progress = &pdu_in_progress; 1118 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1119 tqpair.cpda = 0; 1120 1121 /* Test case: Already received ICreq PDU. Expect: fail */ 1122 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1123 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1124 nvmf_tcp_pdu_ch_handle(&tqpair); 1125 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1126 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1127 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1128 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1129 1130 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1131 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1132 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1133 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1134 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1135 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1136 nvmf_tcp_pdu_ch_handle(&tqpair); 1137 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1138 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1139 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1140 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1141 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1142 1143 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1144 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1145 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1146 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1147 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1148 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1149 nvmf_tcp_pdu_ch_handle(&tqpair); 1150 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1151 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1152 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1153 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1154 1155 /* Test case: Unexpected PDU type. Expect: fail */ 1156 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1157 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1158 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1159 tqpair.pdu_in_progress->hdr.common.plen = 0; 1160 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1161 nvmf_tcp_pdu_ch_handle(&tqpair); 1162 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1163 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1164 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1165 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1166 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1167 1168 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1169 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1170 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1171 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1172 tqpair.pdu_in_progress->hdr.common.plen = 0; 1173 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1174 nvmf_tcp_pdu_ch_handle(&tqpair); 1175 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1176 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1177 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1178 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1179 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1180 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1181 1182 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1183 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1184 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1185 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1186 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1187 tqpair.pdu_in_progress->hdr.common.plen = 0; 1188 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1189 nvmf_tcp_pdu_ch_handle(&tqpair); 1190 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1191 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1192 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1193 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1194 struct spdk_nvme_tcp_term_req_hdr)); 1195 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1196 1197 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1198 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1199 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1200 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1201 tqpair.pdu_in_progress->hdr.common.plen = 0; 1202 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1203 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1204 nvmf_tcp_pdu_ch_handle(&tqpair); 1205 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1206 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1207 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1208 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1209 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1210 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1211 1212 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1213 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1214 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1215 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1216 tqpair.pdu_in_progress->hdr.common.plen = 0; 1217 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1218 nvmf_tcp_pdu_ch_handle(&tqpair); 1219 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1220 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1221 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1222 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1223 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1224 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1225 1226 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1227 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1228 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1229 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1230 tqpair.cpda = 1; 1231 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1232 tqpair.pdu_in_progress->hdr.common.plen = 0; 1233 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1234 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1235 nvmf_tcp_pdu_ch_handle(&tqpair); 1236 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1237 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1238 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1239 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1240 struct spdk_nvme_tcp_term_req_hdr)); 1241 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1242 1243 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1244 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1245 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1246 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1247 tqpair.cpda = 1; 1248 tqpair.pdu_in_progress->hdr.common.plen = 0; 1249 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1250 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1251 nvmf_tcp_pdu_ch_handle(&tqpair); 1252 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1253 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1254 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1255 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1256 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1257 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1258 1259 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1260 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1261 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1262 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1263 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1264 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1265 nvmf_tcp_pdu_ch_handle(&tqpair); 1266 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1267 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1268 struct spdk_nvme_tcp_common_pdu_hdr)); 1269 } 1270 1271 static void 1272 test_nvmf_tcp_tls_add_remove_credentials(void) 1273 { 1274 struct spdk_thread *thread; 1275 struct spdk_nvmf_transport *transport; 1276 struct spdk_nvmf_tcp_transport *ttransport; 1277 struct spdk_nvmf_transport_opts opts; 1278 struct spdk_nvmf_subsystem subsystem; 1279 struct tcp_psk_entry *entry; 1280 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1281 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1282 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1283 char *psk_file_path = "/tmp/psk.txt"; 1284 bool found = false; 1285 FILE *psk_file = NULL; 1286 mode_t oldmask; 1287 1288 thread = spdk_thread_create(NULL, NULL); 1289 SPDK_CU_ASSERT_FATAL(thread != NULL); 1290 spdk_set_thread(thread); 1291 1292 memset(&opts, 0, sizeof(opts)); 1293 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1294 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1295 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1296 opts.max_io_size = UT_MAX_IO_SIZE; 1297 opts.io_unit_size = UT_IO_UNIT_SIZE; 1298 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1299 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1300 transport = nvmf_tcp_create(&opts); 1301 1302 memset(&subsystem, 0, sizeof(subsystem)); 1303 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1304 1305 /* Create a text file containing PSK in interchange format. */ 1306 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1307 psk_file = fopen(psk_file_path, "w"); 1308 CU_ASSERT(psk_file != NULL); 1309 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1310 CU_ASSERT(fclose(psk_file) == 0); 1311 umask(oldmask); 1312 1313 struct spdk_json_val psk_json[] = { 1314 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1315 {"psk", 3, SPDK_JSON_VAL_NAME}, 1316 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1317 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1318 }; 1319 1320 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1321 1322 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1323 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1324 if ((strcmp(subnqn, entry->subnqn) == 0) && 1325 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1326 found = true; 1327 } 1328 } 1329 1330 CU_ASSERT(found == true); 1331 found = false; 1332 1333 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1334 1335 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1336 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1337 if ((strcmp(subnqn, entry->subnqn) == 0) && 1338 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1339 found = true; 1340 } 1341 } 1342 1343 CU_ASSERT(found == false); 1344 1345 CU_ASSERT(remove(psk_file_path) == 0); 1346 1347 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1348 1349 spdk_thread_exit(thread); 1350 while (!spdk_thread_is_exited(thread)) { 1351 spdk_thread_poll(thread, 0, 0); 1352 } 1353 spdk_thread_destroy(thread); 1354 } 1355 1356 static void 1357 test_nvmf_tcp_tls_generate_psk_id(void) 1358 { 1359 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1360 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1361 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1362 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1363 char too_small_psk_id[5] = {}; 1364 1365 /* Check if we can generate expected PSK id. */ 1366 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1367 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1368 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1369 1370 /* Test with a buffer that is too small to fit PSK id. */ 1371 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1372 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1373 1374 /* Test with unknown cipher suite. */ 1375 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1376 subnqn, UINT8_MAX) != 0); 1377 } 1378 1379 static void 1380 test_nvmf_tcp_tls_generate_retained_psk(void) 1381 { 1382 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1383 const char psk_reference1[] = {"1234567890ABCDEF"}; 1384 const char psk_reference2[] = {"FEDCBA0987654321"}; 1385 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1386 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1387 char *unhexlified1; 1388 char *unhexlified2; 1389 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1390 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1391 uint8_t too_small_psk_retained[5] = {}; 1392 int psk_retained_len1, psk_retained_len2; 1393 int retained_size; 1394 1395 unhexlified1 = spdk_unhexlify(psk_reference1); 1396 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1397 unhexlified2 = spdk_unhexlify(psk_reference2); 1398 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1399 1400 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1401 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1402 free(unhexlified1); 1403 free(unhexlified2); 1404 1405 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1406 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1407 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1408 CU_ASSERT(retained_size > 0); 1409 1410 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1411 psk_retained2, 1412 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1413 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1414 1415 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1416 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1417 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1418 CU_ASSERT(psk_retained_len1 > 0); 1419 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1420 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1421 CU_ASSERT(psk_retained_len2 > 0); 1422 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1423 1424 /* Make sure that passing unknown value as hash errors out the function. */ 1425 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1426 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1427 1428 /* Make sure that passing buffer insufficient in size errors out the function. */ 1429 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1430 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1431 } 1432 1433 static void 1434 test_nvmf_tcp_tls_generate_tls_psk(void) 1435 { 1436 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1437 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1438 const char psk_reference[] = {"1234567890ABCDEF"}; 1439 char *unhexlified; 1440 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1441 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1442 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1443 uint8_t too_small_psk_tls[5] = {}; 1444 int retained_size, tls_size; 1445 1446 unhexlified = spdk_unhexlify(psk_reference); 1447 CU_ASSERT(unhexlified != NULL); 1448 1449 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1450 free(unhexlified); 1451 1452 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1453 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1454 CU_ASSERT(retained_size > 0); 1455 1456 /* Make sure that different cipher suites produce different TLS PSKs. */ 1457 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1458 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1459 CU_ASSERT(tls_size > 0); 1460 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1461 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1462 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1463 1464 /* Make sure that passing unknown value as hash errors out the function. */ 1465 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1466 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1467 1468 /* Make sure that passing buffer insufficient in size errors out the function. */ 1469 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1470 too_small_psk_tls, sizeof(too_small_psk_tls), 1471 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1472 } 1473 1474 int 1475 main(int argc, char **argv) 1476 { 1477 CU_pSuite suite = NULL; 1478 unsigned int num_failures; 1479 1480 CU_initialize_registry(); 1481 1482 suite = CU_add_suite("nvmf", NULL, NULL); 1483 1484 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1485 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1486 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1487 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1488 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1489 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1490 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1491 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1492 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1493 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1494 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1495 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1496 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1497 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1498 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1499 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1500 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1501 1502 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1503 CU_cleanup_registry(); 1504 return num_failures; 1505 } 1506