1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 90 (struct spdk_nvmf_subsystem *subsystem), false); 91 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 92 bool, 93 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 94 true); 95 96 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 97 bool, 98 (struct spdk_nvmf_ctrlr *ctrlr), 99 false); 100 101 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 102 bool, 103 (struct spdk_nvmf_ctrlr *ctrlr), 104 false); 105 106 DEFINE_STUB(nvmf_ctrlr_copy_supported, 107 bool, 108 (struct spdk_nvmf_ctrlr *ctrlr), 109 false); 110 111 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 112 int, 113 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 114 struct spdk_nvmf_request *req), 115 0); 116 117 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 118 int, 119 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 120 struct spdk_nvmf_request *req), 121 0); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 169 0); 170 171 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 172 bool, 173 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 174 false); 175 176 DEFINE_STUB(nvmf_transport_req_complete, 177 int, 178 (struct spdk_nvmf_request *req), 179 0); 180 181 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 182 bool, 183 (struct spdk_bdev *bdev), 184 false); 185 186 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 187 int, 188 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 189 struct spdk_nvmf_request *req), 190 0); 191 192 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 193 194 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 195 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 196 struct spdk_nvmf_transport *transport)); 197 198 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 199 int, 200 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 201 0); 202 203 DEFINE_STUB(spdk_sock_group_get_ctx, 204 void *, 205 (struct spdk_sock_group *group), 206 NULL); 207 208 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 209 210 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 211 enum spdk_nvme_transport_type trtype)); 212 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 213 214 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 215 216 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 217 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 218 219 DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s)); 220 221 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 222 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 223 224 DEFINE_STUB(nvmf_transport_req_free, 225 int, 226 (struct spdk_nvmf_request *req), 227 0); 228 229 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 230 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 231 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 232 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 233 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 234 235 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 236 (const struct spdk_bdev *bdev), 0); 237 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 238 (const struct spdk_bdev *bdev), 0); 239 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 240 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 241 242 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 243 (const struct spdk_nvme_ns_data *nsdata), 0); 244 245 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 246 247 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 248 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 249 (const struct spdk_nvmf_subsystem *subsystem), NULL); 250 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 251 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 252 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 253 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 254 255 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 256 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 257 false); 258 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 259 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 260 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 261 262 struct spdk_io_channel * 263 spdk_accel_get_io_channel(void) 264 { 265 return spdk_get_io_channel(g_accel_p); 266 } 267 268 DEFINE_STUB(spdk_accel_submit_crc32cv, 269 int, 270 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 271 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 272 0); 273 274 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 275 int, 276 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 277 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 278 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 279 0) 280 281 struct spdk_bdev { 282 int ut_mock; 283 uint64_t blockcnt; 284 }; 285 286 int 287 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 288 const struct spdk_nvme_transport_id *trid2) 289 { 290 return 0; 291 } 292 293 const char * 294 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 295 { 296 switch (trtype) { 297 case SPDK_NVME_TRANSPORT_PCIE: 298 return "PCIe"; 299 case SPDK_NVME_TRANSPORT_RDMA: 300 return "RDMA"; 301 case SPDK_NVME_TRANSPORT_FC: 302 return "FC"; 303 default: 304 return NULL; 305 } 306 } 307 308 int 309 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 310 { 311 int len, i; 312 313 if (trstring == NULL) { 314 return -EINVAL; 315 } 316 317 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 318 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 319 return -EINVAL; 320 } 321 322 /* cast official trstring to uppercase version of input. */ 323 for (i = 0; i < len; i++) { 324 trid->trstring[i] = toupper(trstring[i]); 325 } 326 return 0; 327 } 328 329 int 330 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 331 struct spdk_nvmf_transport_poll_group *group, 332 struct spdk_nvmf_transport *transport, 333 uint32_t length) 334 { 335 /* length more than 1 io unit length will fail. */ 336 if (length >= transport->opts.io_unit_size) { 337 return -EINVAL; 338 } 339 340 req->iovcnt = 1; 341 req->iov[0].iov_base = (void *)0xDEADBEEF; 342 343 return 0; 344 } 345 346 347 void 348 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 349 bool dif_insert_or_strip) 350 { 351 uint64_t num_blocks; 352 353 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 354 num_blocks = ns->bdev->blockcnt; 355 nsdata->nsze = num_blocks; 356 nsdata->ncap = num_blocks; 357 nsdata->nuse = num_blocks; 358 nsdata->nlbaf = 0; 359 nsdata->flbas.format = 0; 360 nsdata->flbas.msb_format = 0; 361 nsdata->lbaf[0].lbads = spdk_u32log2(512); 362 } 363 364 const char * 365 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 366 { 367 return subsystem->sn; 368 } 369 370 const char * 371 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 372 { 373 return subsystem->mn; 374 } 375 376 static void 377 test_nvmf_tcp_create(void) 378 { 379 struct spdk_thread *thread; 380 struct spdk_nvmf_transport *transport; 381 struct spdk_nvmf_tcp_transport *ttransport; 382 struct spdk_nvmf_transport_opts opts; 383 384 thread = spdk_thread_create(NULL, NULL); 385 SPDK_CU_ASSERT_FATAL(thread != NULL); 386 spdk_set_thread(thread); 387 388 /* case 1 */ 389 memset(&opts, 0, sizeof(opts)); 390 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 391 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 392 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 393 opts.max_io_size = UT_MAX_IO_SIZE; 394 opts.io_unit_size = UT_IO_UNIT_SIZE; 395 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 396 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 397 /* expect success */ 398 transport = nvmf_tcp_create(&opts); 399 CU_ASSERT_PTR_NOT_NULL(transport); 400 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 401 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 402 transport->opts = opts; 403 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 404 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 405 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 406 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 407 /* destroy transport */ 408 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 409 410 /* case 2 */ 411 memset(&opts, 0, sizeof(opts)); 412 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 413 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 414 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 415 opts.max_io_size = UT_MAX_IO_SIZE; 416 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 417 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 418 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 419 /* expect success */ 420 transport = nvmf_tcp_create(&opts); 421 CU_ASSERT_PTR_NOT_NULL(transport); 422 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 423 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 424 transport->opts = opts; 425 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 426 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 427 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 428 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 429 /* destroy transport */ 430 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 431 432 /* case 3 */ 433 memset(&opts, 0, sizeof(opts)); 434 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 435 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 436 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 437 opts.max_io_size = UT_MAX_IO_SIZE; 438 opts.io_unit_size = 16; 439 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 440 /* expect fails */ 441 transport = nvmf_tcp_create(&opts); 442 CU_ASSERT_PTR_NULL(transport); 443 444 spdk_thread_exit(thread); 445 while (!spdk_thread_is_exited(thread)) { 446 spdk_thread_poll(thread, 0, 0); 447 } 448 spdk_thread_destroy(thread); 449 } 450 451 static void 452 test_nvmf_tcp_destroy(void) 453 { 454 struct spdk_thread *thread; 455 struct spdk_nvmf_transport *transport; 456 struct spdk_nvmf_transport_opts opts; 457 458 thread = spdk_thread_create(NULL, NULL); 459 SPDK_CU_ASSERT_FATAL(thread != NULL); 460 spdk_set_thread(thread); 461 462 /* case 1 */ 463 memset(&opts, 0, sizeof(opts)); 464 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 465 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 466 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 467 opts.max_io_size = UT_MAX_IO_SIZE; 468 opts.io_unit_size = UT_IO_UNIT_SIZE; 469 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 470 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 471 transport = nvmf_tcp_create(&opts); 472 CU_ASSERT_PTR_NOT_NULL(transport); 473 transport->opts = opts; 474 /* destroy transport */ 475 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 476 477 spdk_thread_exit(thread); 478 while (!spdk_thread_is_exited(thread)) { 479 spdk_thread_poll(thread, 0, 0); 480 } 481 spdk_thread_destroy(thread); 482 } 483 484 static void 485 init_accel(void) 486 { 487 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 488 sizeof(int), "accel_p"); 489 } 490 491 static void 492 fini_accel(void) 493 { 494 spdk_io_device_unregister(g_accel_p, NULL); 495 } 496 497 static void 498 test_nvmf_tcp_poll_group_create(void) 499 { 500 struct spdk_nvmf_transport *transport; 501 struct spdk_nvmf_transport_poll_group *group; 502 struct spdk_nvmf_tcp_poll_group *tgroup; 503 struct spdk_thread *thread; 504 struct spdk_nvmf_transport_opts opts; 505 struct spdk_sock_group grp = {}; 506 507 thread = spdk_thread_create(NULL, NULL); 508 SPDK_CU_ASSERT_FATAL(thread != NULL); 509 spdk_set_thread(thread); 510 511 init_accel(); 512 513 memset(&opts, 0, sizeof(opts)); 514 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 515 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 516 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 517 opts.max_io_size = UT_MAX_IO_SIZE; 518 opts.io_unit_size = UT_IO_UNIT_SIZE; 519 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 520 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 521 transport = nvmf_tcp_create(&opts); 522 CU_ASSERT_PTR_NOT_NULL(transport); 523 transport->opts = opts; 524 MOCK_SET(spdk_sock_group_create, &grp); 525 group = nvmf_tcp_poll_group_create(transport, NULL); 526 MOCK_CLEAR_P(spdk_sock_group_create); 527 SPDK_CU_ASSERT_FATAL(group); 528 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 529 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 530 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 531 } 532 group->transport = transport; 533 nvmf_tcp_poll_group_destroy(group); 534 nvmf_tcp_destroy(transport, NULL, NULL); 535 536 fini_accel(); 537 spdk_thread_exit(thread); 538 while (!spdk_thread_is_exited(thread)) { 539 spdk_thread_poll(thread, 0, 0); 540 } 541 spdk_thread_destroy(thread); 542 } 543 544 static void 545 test_nvmf_tcp_send_c2h_data(void) 546 { 547 struct spdk_thread *thread; 548 struct spdk_nvmf_tcp_transport ttransport = {}; 549 struct spdk_nvmf_tcp_qpair tqpair = {}; 550 struct spdk_nvmf_tcp_req tcp_req = {}; 551 struct nvme_tcp_pdu pdu = {}; 552 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 553 554 ttransport.tcp_opts.c2h_success = true; 555 thread = spdk_thread_create(NULL, NULL); 556 SPDK_CU_ASSERT_FATAL(thread != NULL); 557 spdk_set_thread(thread); 558 559 tcp_req.pdu = &pdu; 560 tcp_req.req.length = 300; 561 tcp_req.req.qpair = &tqpair.qpair; 562 563 tqpair.qpair.transport = &ttransport.transport; 564 565 /* Set qpair state to make unrelated operations NOP */ 566 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 567 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 568 569 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 570 571 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 572 tcp_req.req.iov[0].iov_len = 101; 573 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 574 tcp_req.req.iov[1].iov_len = 100; 575 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 576 tcp_req.req.iov[2].iov_len = 99; 577 tcp_req.req.iovcnt = 3; 578 tcp_req.req.length = 300; 579 580 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 581 582 c2h_data = &pdu.hdr.c2h_data; 583 CU_ASSERT(c2h_data->datao == 0); 584 CU_ASSERT(c2h_data->datal = 300); 585 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 586 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 587 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 588 589 CU_ASSERT(pdu.data_iovcnt == 3); 590 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 591 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 592 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 593 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 594 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 595 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 596 597 tcp_req.pdu_in_use = false; 598 tcp_req.rsp.cdw0 = 1; 599 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 600 601 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 602 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 603 604 ttransport.tcp_opts.c2h_success = false; 605 tcp_req.pdu_in_use = false; 606 tcp_req.rsp.cdw0 = 0; 607 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 608 609 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 610 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 611 612 tcp_req.pdu_in_use = false; 613 tcp_req.rsp.cdw0 = 1; 614 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 615 616 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 617 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 618 619 spdk_thread_exit(thread); 620 while (!spdk_thread_is_exited(thread)) { 621 spdk_thread_poll(thread, 0, 0); 622 } 623 spdk_thread_destroy(thread); 624 } 625 626 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 627 628 static void 629 test_nvmf_tcp_h2c_data_hdr_handle(void) 630 { 631 struct spdk_nvmf_tcp_transport ttransport = {}; 632 struct spdk_nvmf_tcp_qpair tqpair = {}; 633 struct nvme_tcp_pdu pdu = {}; 634 struct spdk_nvmf_tcp_req tcp_req = {}; 635 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 636 637 /* Set qpair state to make unrelated operations NOP */ 638 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 639 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 640 tqpair.resource_count = 1; 641 tqpair.reqs = &tcp_req; 642 643 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 644 tcp_req.req.iov[0].iov_len = 101; 645 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 646 tcp_req.req.iov[1].iov_len = 99; 647 tcp_req.req.iovcnt = 2; 648 tcp_req.req.length = 200; 649 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 650 651 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 652 tcp_req.req.cmd->nvme_cmd.cid = 1; 653 tcp_req.ttag = 1; 654 655 h2c_data = &pdu.hdr.h2c_data; 656 h2c_data->cccid = 1; 657 h2c_data->ttag = 1; 658 h2c_data->datao = 0; 659 h2c_data->datal = 200; 660 661 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 662 663 CU_ASSERT(pdu.data_iovcnt == 2); 664 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 665 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 666 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 667 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 668 } 669 670 671 static void 672 test_nvmf_tcp_in_capsule_data_handle(void) 673 { 674 struct spdk_nvmf_tcp_transport ttransport = {}; 675 struct spdk_nvmf_tcp_qpair tqpair = {}; 676 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 677 union nvmf_c2h_msg rsp0 = {}; 678 union nvmf_c2h_msg rsp = {}; 679 680 struct spdk_nvmf_request *req_temp = NULL; 681 struct spdk_nvmf_tcp_req tcp_req2 = {}; 682 struct spdk_nvmf_tcp_req tcp_req1 = {}; 683 684 struct spdk_nvme_tcp_cmd *capsule_data; 685 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 686 struct spdk_nvme_sgl_descriptor *sgl; 687 688 struct spdk_nvmf_transport_poll_group *group; 689 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 690 struct spdk_sock_group grp = {}; 691 692 tqpair.pdu_in_progress = &pdu_in_progress; 693 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 694 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 695 696 tcp_group.sock_group = &grp; 697 TAILQ_INIT(&tcp_group.qpairs); 698 group = &tcp_group.group; 699 group->transport = &ttransport.transport; 700 STAILQ_INIT(&group->pending_buf_queue); 701 tqpair.group = &tcp_group; 702 703 TAILQ_INIT(&tqpair.tcp_req_free_queue); 704 TAILQ_INIT(&tqpair.tcp_req_working_queue); 705 706 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 707 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 708 tqpair.qpair.transport = &ttransport.transport; 709 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 710 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 711 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 712 713 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 714 tcp_req2.req.qpair = &tqpair.qpair; 715 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 716 tcp_req2.req.rsp = &rsp; 717 718 /* init tcp_req1 */ 719 tcp_req1.req.qpair = &tqpair.qpair; 720 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 721 tcp_req1.req.rsp = &rsp0; 722 tcp_req1.state = TCP_REQUEST_STATE_NEW; 723 724 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 725 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 726 727 /* init pdu, make pdu need sgl buff */ 728 pdu = tqpair.pdu_in_progress; 729 capsule_data = &pdu->hdr.capsule_cmd; 730 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 731 sgl = &capsule_data->ccsqe.dptr.sgl1; 732 733 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 734 capsule_data->common.hlen = sizeof(*capsule_data); 735 capsule_data->common.plen = 1096; 736 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 737 738 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 739 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 740 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 741 742 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 743 744 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 745 nvmf_tcp_req_process(&ttransport, &tcp_req1); 746 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 747 748 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 749 750 /* process tqpair capsule req. but we still remain req in pending_buff. */ 751 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 752 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 753 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 754 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 755 if (req_temp == &tcp_req2.req) { 756 break; 757 } 758 } 759 CU_ASSERT(req_temp == NULL); 760 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 761 } 762 763 static void 764 test_nvmf_tcp_qpair_init_mem_resource(void) 765 { 766 int rc; 767 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 768 struct spdk_nvmf_transport transport = {}; 769 struct spdk_thread *thread; 770 771 thread = spdk_thread_create(NULL, NULL); 772 SPDK_CU_ASSERT_FATAL(thread != NULL); 773 spdk_set_thread(thread); 774 775 tqpair = calloc(1, sizeof(*tqpair)); 776 tqpair->qpair.transport = &transport; 777 778 nvmf_tcp_opts_init(&transport.opts); 779 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 780 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 781 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 782 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 783 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 784 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 785 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 786 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 787 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 788 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 789 CU_ASSERT(transport.opts.transport_specific == NULL); 790 791 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 792 CU_ASSERT(rc == 0); 793 CU_ASSERT(tqpair->host_hdgst_enable == true); 794 CU_ASSERT(tqpair->host_ddgst_enable == true); 795 796 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 797 CU_ASSERT(rc == 0); 798 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 799 CU_ASSERT(tqpair->reqs != NULL); 800 CU_ASSERT(tqpair->bufs != NULL); 801 CU_ASSERT(tqpair->pdus != NULL); 802 /* Just to check the first and last entry */ 803 CU_ASSERT(tqpair->reqs[0].ttag == 1); 804 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 805 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 806 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 807 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 808 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 809 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 810 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 811 CU_ASSERT(tqpair->reqs[127].ttag == 128); 812 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 813 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 814 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 815 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 816 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 817 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 818 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 819 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 820 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 821 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 822 CU_ASSERT(tqpair->pdu_in_progress == 823 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 824 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 825 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 826 827 /* Free all of tqpair resource */ 828 nvmf_tcp_qpair_destroy(tqpair); 829 830 spdk_thread_exit(thread); 831 while (!spdk_thread_is_exited(thread)) { 832 spdk_thread_poll(thread, 0, 0); 833 } 834 spdk_thread_destroy(thread); 835 } 836 837 static void 838 test_nvmf_tcp_send_c2h_term_req(void) 839 { 840 struct spdk_nvmf_tcp_qpair tqpair = {}; 841 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 842 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 843 uint32_t error_offset = 1; 844 845 mgmt_pdu.qpair = &tqpair; 846 tqpair.mgmt_pdu = &mgmt_pdu; 847 tqpair.pdu_in_progress = &pdu_in_progress; 848 tqpair.tcp_pdu_working_count = 1; 849 850 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 851 pdu.hdr.common.hlen = 64; 852 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 853 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 854 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 855 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 856 pdu.hdr.common.hlen); 857 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 858 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 859 860 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 861 pdu.hdr.common.hlen = 255; 862 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 863 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 864 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 865 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 866 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 867 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 868 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 869 } 870 871 static void 872 test_nvmf_tcp_send_capsule_resp_pdu(void) 873 { 874 struct spdk_nvmf_tcp_req tcp_req = {}; 875 struct spdk_nvmf_tcp_qpair tqpair = {}; 876 struct nvme_tcp_pdu pdu = {}; 877 878 tcp_req.pdu_in_use = false; 879 tcp_req.req.qpair = &tqpair.qpair; 880 tcp_req.pdu = &pdu; 881 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 882 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 883 tqpair.host_hdgst_enable = true; 884 885 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 886 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 887 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 888 SPDK_NVME_TCP_DIGEST_LEN); 889 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 890 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 891 sizeof(struct spdk_nvme_cpl))); 892 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 893 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 894 CU_ASSERT(pdu.cb_arg == &tcp_req); 895 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 896 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 897 898 /* hdgst disable */ 899 tqpair.host_hdgst_enable = false; 900 tcp_req.pdu_in_use = false; 901 memset(&pdu, 0, sizeof(pdu)); 902 903 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 904 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 905 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 906 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 907 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 908 sizeof(struct spdk_nvme_cpl))); 909 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 910 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 911 CU_ASSERT(pdu.cb_arg == &tcp_req); 912 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 913 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 914 } 915 916 static void 917 test_nvmf_tcp_icreq_handle(void) 918 { 919 struct spdk_nvmf_tcp_transport ttransport = {}; 920 struct spdk_nvmf_tcp_qpair tqpair = {}; 921 struct nvme_tcp_pdu pdu = {}; 922 struct nvme_tcp_pdu mgmt_pdu = {}; 923 struct nvme_tcp_pdu pdu_in_progress = {}; 924 struct spdk_nvme_tcp_ic_resp *ic_resp; 925 926 mgmt_pdu.qpair = &tqpair; 927 tqpair.mgmt_pdu = &mgmt_pdu; 928 tqpair.pdu_in_progress = &pdu_in_progress; 929 tqpair.tcp_pdu_working_count = 1; 930 931 /* case 1: Expected ICReq PFV 0 and got are different. */ 932 pdu.hdr.ic_req.pfv = 1; 933 934 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 935 936 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 937 938 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 939 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 940 941 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 942 943 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 944 945 /* case 3: Expect: PASS. */ 946 ttransport.transport.opts.max_io_size = 32; 947 pdu.hdr.ic_req.pfv = 0; 948 tqpair.host_hdgst_enable = false; 949 tqpair.host_ddgst_enable = false; 950 tqpair.recv_buf_size = 64; 951 pdu.hdr.ic_req.hpda = 16; 952 953 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 954 955 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 956 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 957 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 958 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 959 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 960 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 961 CU_ASSERT(ic_resp->pfv == 0); 962 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 963 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 964 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 965 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 966 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 967 } 968 969 static void 970 test_nvmf_tcp_check_xfer_type(void) 971 { 972 const uint16_t cid = 0xAA; 973 struct spdk_nvmf_tcp_transport ttransport = {}; 974 struct spdk_nvmf_tcp_qpair tqpair = {}; 975 struct nvme_tcp_pdu pdu_in_progress = {}; 976 union nvmf_c2h_msg rsp0 = {}; 977 978 struct spdk_nvmf_tcp_req tcp_req = {}; 979 struct nvme_tcp_pdu rsp_pdu = {}; 980 981 struct spdk_nvme_tcp_cmd *capsule_data; 982 struct spdk_nvme_sgl_descriptor *sgl; 983 984 struct spdk_nvmf_transport_poll_group *group; 985 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 986 struct spdk_sock_group grp = {}; 987 988 tqpair.pdu_in_progress = &pdu_in_progress; 989 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 990 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 991 992 tcp_group.sock_group = &grp; 993 TAILQ_INIT(&tcp_group.qpairs); 994 group = &tcp_group.group; 995 group->transport = &ttransport.transport; 996 STAILQ_INIT(&group->pending_buf_queue); 997 tqpair.group = &tcp_group; 998 999 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1000 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1001 1002 tqpair.qpair.transport = &ttransport.transport; 1003 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1004 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1005 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1006 1007 /* init tcp_req */ 1008 tcp_req.req.qpair = &tqpair.qpair; 1009 tcp_req.pdu = &rsp_pdu; 1010 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1011 tcp_req.req.rsp = &rsp0; 1012 tcp_req.state = TCP_REQUEST_STATE_NEW; 1013 1014 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1015 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1016 1017 /* init pdu, make pdu need sgl buff */ 1018 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1019 sgl = &capsule_data->ccsqe.dptr.sgl1; 1020 1021 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1022 capsule_data->common.hlen = sizeof(*capsule_data); 1023 capsule_data->common.plen = 1096; 1024 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1025 /* Need to set to a non zero valid to check it gets copied to the response */ 1026 capsule_data->ccsqe.cid = cid; 1027 1028 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1029 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1030 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1031 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1032 1033 /* Process a command and ensure that it fails and the request is set up to return an error */ 1034 nvmf_tcp_req_process(&ttransport, &tcp_req); 1035 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1036 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1037 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1038 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1039 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1040 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1041 } 1042 1043 static void 1044 test_nvmf_tcp_invalid_sgl(void) 1045 { 1046 const uint16_t cid = 0xAABB; 1047 struct spdk_nvmf_tcp_transport ttransport = {}; 1048 struct spdk_nvmf_tcp_qpair tqpair = {}; 1049 struct nvme_tcp_pdu pdu_in_progress = {}; 1050 union nvmf_c2h_msg rsp0 = {}; 1051 1052 struct spdk_nvmf_tcp_req tcp_req = {}; 1053 struct nvme_tcp_pdu rsp_pdu = {}; 1054 struct nvme_tcp_pdu mgmt_pdu = {}; 1055 1056 struct spdk_nvme_tcp_cmd *capsule_data; 1057 struct spdk_nvme_sgl_descriptor *sgl; 1058 1059 struct spdk_nvmf_transport_poll_group *group; 1060 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1061 struct spdk_sock_group grp = {}; 1062 1063 tqpair.pdu_in_progress = &pdu_in_progress; 1064 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1065 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1066 1067 tcp_group.sock_group = &grp; 1068 TAILQ_INIT(&tcp_group.qpairs); 1069 group = &tcp_group.group; 1070 group->transport = &ttransport.transport; 1071 STAILQ_INIT(&group->pending_buf_queue); 1072 tqpair.group = &tcp_group; 1073 1074 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1075 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1076 1077 tqpair.qpair.transport = &ttransport.transport; 1078 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1079 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1080 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1081 1082 /* init tcp_req */ 1083 tcp_req.req.qpair = &tqpair.qpair; 1084 tcp_req.pdu = &rsp_pdu; 1085 tcp_req.pdu->qpair = &tqpair; 1086 tqpair.mgmt_pdu = &mgmt_pdu; 1087 tqpair.mgmt_pdu->qpair = &tqpair; 1088 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1089 tcp_req.req.rsp = &rsp0; 1090 tcp_req.state = TCP_REQUEST_STATE_NEW; 1091 1092 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1093 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1094 1095 /* init pdu, make pdu need sgl buff */ 1096 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1097 sgl = &capsule_data->ccsqe.dptr.sgl1; 1098 1099 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1100 capsule_data->common.hlen = sizeof(*capsule_data); 1101 capsule_data->common.plen = 1096; 1102 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1103 /* Need to set to a non zero valid to check it gets copied to the response */ 1104 capsule_data->ccsqe.cid = cid; 1105 1106 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1107 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1108 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1109 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1110 1111 /* Process a command and ensure that it fails and the request is set up to return an error */ 1112 nvmf_tcp_req_process(&ttransport, &tcp_req); 1113 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1114 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1115 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1116 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1117 } 1118 1119 static void 1120 test_nvmf_tcp_pdu_ch_handle(void) 1121 { 1122 struct spdk_nvmf_tcp_qpair tqpair = {}; 1123 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1124 1125 mgmt_pdu.qpair = &tqpair; 1126 tqpair.mgmt_pdu = &mgmt_pdu; 1127 tqpair.pdu_in_progress = &pdu_in_progress; 1128 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1129 tqpair.cpda = 0; 1130 1131 /* Test case: Already received ICreq PDU. Expect: fail */ 1132 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1133 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1134 nvmf_tcp_pdu_ch_handle(&tqpair); 1135 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1136 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1137 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1138 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1139 1140 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1141 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1142 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1143 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1144 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1145 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1146 nvmf_tcp_pdu_ch_handle(&tqpair); 1147 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1148 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1149 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1150 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1151 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1152 1153 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1154 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1155 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1156 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1157 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1158 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1159 nvmf_tcp_pdu_ch_handle(&tqpair); 1160 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1161 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1162 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1163 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1164 1165 /* Test case: Unexpected PDU type. Expect: fail */ 1166 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1167 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1168 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1169 tqpair.pdu_in_progress->hdr.common.plen = 0; 1170 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1171 nvmf_tcp_pdu_ch_handle(&tqpair); 1172 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1173 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1174 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1175 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1176 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1177 1178 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1179 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1180 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1181 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1182 tqpair.pdu_in_progress->hdr.common.plen = 0; 1183 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1184 nvmf_tcp_pdu_ch_handle(&tqpair); 1185 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1186 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1187 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1188 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1189 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1190 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1191 1192 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1193 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1194 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1195 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1196 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1197 tqpair.pdu_in_progress->hdr.common.plen = 0; 1198 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1199 nvmf_tcp_pdu_ch_handle(&tqpair); 1200 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1201 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1202 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1203 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1204 struct spdk_nvme_tcp_term_req_hdr)); 1205 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1206 1207 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1208 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1209 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1210 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1211 tqpair.pdu_in_progress->hdr.common.plen = 0; 1212 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1213 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1214 nvmf_tcp_pdu_ch_handle(&tqpair); 1215 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1216 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1217 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1218 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1219 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1220 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1221 1222 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1223 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1224 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1225 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1226 tqpair.pdu_in_progress->hdr.common.plen = 0; 1227 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1228 nvmf_tcp_pdu_ch_handle(&tqpair); 1229 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1230 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1231 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1232 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1233 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1234 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1235 1236 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1237 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1238 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1239 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1240 tqpair.cpda = 1; 1241 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1242 tqpair.pdu_in_progress->hdr.common.plen = 0; 1243 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1244 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1245 nvmf_tcp_pdu_ch_handle(&tqpair); 1246 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1247 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1248 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1249 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1250 struct spdk_nvme_tcp_term_req_hdr)); 1251 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1252 1253 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1254 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1255 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1256 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1257 tqpair.cpda = 1; 1258 tqpair.pdu_in_progress->hdr.common.plen = 0; 1259 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1260 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1261 nvmf_tcp_pdu_ch_handle(&tqpair); 1262 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1263 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1264 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1265 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1266 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1267 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1268 1269 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1270 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1271 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1272 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1273 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1274 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1275 nvmf_tcp_pdu_ch_handle(&tqpair); 1276 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1277 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1278 struct spdk_nvme_tcp_common_pdu_hdr)); 1279 } 1280 1281 static void 1282 test_nvmf_tcp_tls_add_remove_credentials(void) 1283 { 1284 struct spdk_thread *thread; 1285 struct spdk_nvmf_transport *transport; 1286 struct spdk_nvmf_tcp_transport *ttransport; 1287 struct spdk_nvmf_transport_opts opts; 1288 struct spdk_nvmf_subsystem subsystem; 1289 struct tcp_psk_entry *entry; 1290 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1291 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1292 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1293 char *psk_file_path = "/tmp/psk.txt"; 1294 bool found = false; 1295 FILE *psk_file = NULL; 1296 mode_t oldmask; 1297 1298 thread = spdk_thread_create(NULL, NULL); 1299 SPDK_CU_ASSERT_FATAL(thread != NULL); 1300 spdk_set_thread(thread); 1301 1302 memset(&opts, 0, sizeof(opts)); 1303 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1304 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1305 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1306 opts.max_io_size = UT_MAX_IO_SIZE; 1307 opts.io_unit_size = UT_IO_UNIT_SIZE; 1308 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1309 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1310 transport = nvmf_tcp_create(&opts); 1311 1312 memset(&subsystem, 0, sizeof(subsystem)); 1313 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1314 1315 /* Create a text file containing PSK in interchange format. */ 1316 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1317 psk_file = fopen(psk_file_path, "w"); 1318 CU_ASSERT(psk_file != NULL); 1319 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1320 CU_ASSERT(fclose(psk_file) == 0); 1321 umask(oldmask); 1322 1323 struct spdk_json_val psk_json[] = { 1324 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1325 {"psk", 3, SPDK_JSON_VAL_NAME}, 1326 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1327 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1328 }; 1329 1330 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1331 1332 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1333 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1334 if ((strcmp(subnqn, entry->subnqn) == 0) && 1335 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1336 found = true; 1337 } 1338 } 1339 1340 CU_ASSERT(found == true); 1341 found = false; 1342 1343 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1344 1345 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1346 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1347 if ((strcmp(subnqn, entry->subnqn) == 0) && 1348 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1349 found = true; 1350 } 1351 } 1352 1353 CU_ASSERT(found == false); 1354 1355 CU_ASSERT(remove(psk_file_path) == 0); 1356 1357 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1358 1359 spdk_thread_exit(thread); 1360 while (!spdk_thread_is_exited(thread)) { 1361 spdk_thread_poll(thread, 0, 0); 1362 } 1363 spdk_thread_destroy(thread); 1364 } 1365 1366 static void 1367 test_nvmf_tcp_tls_generate_psk_id(void) 1368 { 1369 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1370 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1371 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1372 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1373 char too_small_psk_id[5] = {}; 1374 1375 /* Check if we can generate expected PSK id. */ 1376 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1377 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1378 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1379 1380 /* Test with a buffer that is too small to fit PSK id. */ 1381 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1382 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1383 1384 /* Test with unknown cipher suite. */ 1385 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1386 subnqn, UINT8_MAX) != 0); 1387 } 1388 1389 static void 1390 test_nvmf_tcp_tls_generate_retained_psk(void) 1391 { 1392 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1393 const char psk_reference1[] = {"1234567890ABCDEF"}; 1394 const char psk_reference2[] = {"FEDCBA0987654321"}; 1395 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1396 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1397 char *unhexlified1; 1398 char *unhexlified2; 1399 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1400 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1401 uint8_t too_small_psk_retained[5] = {}; 1402 int psk_retained_len1, psk_retained_len2; 1403 int retained_size; 1404 1405 unhexlified1 = spdk_unhexlify(psk_reference1); 1406 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1407 unhexlified2 = spdk_unhexlify(psk_reference2); 1408 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1409 1410 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1411 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1412 free(unhexlified1); 1413 free(unhexlified2); 1414 1415 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1416 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1417 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1418 CU_ASSERT(retained_size > 0); 1419 1420 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1421 psk_retained2, 1422 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1423 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1424 1425 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1426 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1427 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1428 CU_ASSERT(psk_retained_len1 > 0); 1429 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1430 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1431 CU_ASSERT(psk_retained_len2 > 0); 1432 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1433 1434 /* Make sure that passing unknown value as hash errors out the function. */ 1435 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1436 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1437 1438 /* Make sure that passing buffer insufficient in size errors out the function. */ 1439 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1440 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1441 } 1442 1443 static void 1444 test_nvmf_tcp_tls_generate_tls_psk(void) 1445 { 1446 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1447 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1448 const char psk_reference[] = {"1234567890ABCDEF"}; 1449 char *unhexlified; 1450 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1451 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1452 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1453 uint8_t too_small_psk_tls[5] = {}; 1454 int retained_size, tls_size; 1455 1456 unhexlified = spdk_unhexlify(psk_reference); 1457 CU_ASSERT(unhexlified != NULL); 1458 1459 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1460 free(unhexlified); 1461 1462 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1463 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1464 CU_ASSERT(retained_size > 0); 1465 1466 /* Make sure that different cipher suites produce different TLS PSKs. */ 1467 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1468 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1469 CU_ASSERT(tls_size > 0); 1470 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1471 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1472 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1473 1474 /* Make sure that passing unknown value as hash errors out the function. */ 1475 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1476 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1477 1478 /* Make sure that passing buffer insufficient in size errors out the function. */ 1479 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1480 too_small_psk_tls, sizeof(too_small_psk_tls), 1481 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1482 } 1483 1484 int 1485 main(int argc, char **argv) 1486 { 1487 CU_pSuite suite = NULL; 1488 unsigned int num_failures; 1489 1490 CU_initialize_registry(); 1491 1492 suite = CU_add_suite("nvmf", NULL, NULL); 1493 1494 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1495 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1496 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1497 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1498 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1499 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1500 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1501 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1502 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1503 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1504 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1505 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1506 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1507 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1508 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1509 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1510 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1511 1512 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1513 CU_cleanup_registry(); 1514 return num_failures; 1515 } 1516