1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 90 (struct spdk_nvmf_subsystem *subsystem), false); 91 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 92 bool, 93 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 94 true); 95 96 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 97 bool, 98 (struct spdk_nvmf_ctrlr *ctrlr), 99 false); 100 101 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 102 bool, 103 (struct spdk_nvmf_ctrlr *ctrlr), 104 false); 105 106 DEFINE_STUB(nvmf_ctrlr_copy_supported, 107 bool, 108 (struct spdk_nvmf_ctrlr *ctrlr), 109 false); 110 111 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 112 int, 113 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 114 struct spdk_nvmf_request *req), 115 0); 116 117 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 118 int, 119 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 120 struct spdk_nvmf_request *req), 121 0); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 169 0); 170 171 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 172 bool, 173 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 174 false); 175 176 DEFINE_STUB_V(nvmf_bdev_ctrlr_identify_iocs_nvm, 177 (struct spdk_nvmf_ns *ns, struct spdk_nvme_nvm_ns_data *nsdata_nvm)); 178 179 DEFINE_STUB(nvmf_transport_req_complete, 180 int, 181 (struct spdk_nvmf_request *req), 182 0); 183 184 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 185 bool, 186 (struct spdk_bdev *bdev), 187 false); 188 189 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 190 int, 191 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 192 struct spdk_nvmf_request *req), 193 0); 194 195 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 196 197 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 198 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 199 struct spdk_nvmf_transport *transport)); 200 201 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 202 int, 203 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 204 0); 205 206 DEFINE_STUB(spdk_sock_group_get_ctx, 207 void *, 208 (struct spdk_sock_group *group), 209 NULL); 210 211 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 212 213 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 214 enum spdk_nvme_transport_type trtype)); 215 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 216 217 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 218 219 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 220 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 221 222 DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s)); 223 224 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 225 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 226 227 DEFINE_STUB(nvmf_transport_req_free, 228 int, 229 (struct spdk_nvmf_request *req), 230 0); 231 232 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 233 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 234 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 235 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 236 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 237 238 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 239 (const struct spdk_bdev *bdev), 0); 240 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 241 (const struct spdk_bdev *bdev), 0); 242 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 243 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 244 245 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 246 (const struct spdk_nvme_ns_data *nsdata), 0); 247 248 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 249 250 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 251 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 252 (const struct spdk_nvmf_subsystem *subsystem), NULL); 253 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 254 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 255 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 256 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 257 258 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 259 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 260 false); 261 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 262 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 263 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 264 DEFINE_STUB(nvmf_request_get_buffers_abort, bool, (struct spdk_nvmf_request *r), false); 265 266 struct spdk_io_channel * 267 spdk_accel_get_io_channel(void) 268 { 269 return spdk_get_io_channel(g_accel_p); 270 } 271 272 DEFINE_STUB(spdk_accel_submit_crc32cv, 273 int, 274 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 275 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 276 0); 277 278 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 279 int, 280 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 281 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 282 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 283 0) 284 285 struct spdk_bdev { 286 int ut_mock; 287 uint64_t blockcnt; 288 }; 289 290 int 291 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 292 const struct spdk_nvme_transport_id *trid2) 293 { 294 return 0; 295 } 296 297 const char * 298 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 299 { 300 switch (trtype) { 301 case SPDK_NVME_TRANSPORT_PCIE: 302 return "PCIe"; 303 case SPDK_NVME_TRANSPORT_RDMA: 304 return "RDMA"; 305 case SPDK_NVME_TRANSPORT_FC: 306 return "FC"; 307 default: 308 return NULL; 309 } 310 } 311 312 int 313 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 314 { 315 int len, i; 316 317 if (trstring == NULL) { 318 return -EINVAL; 319 } 320 321 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 322 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 323 return -EINVAL; 324 } 325 326 /* cast official trstring to uppercase version of input. */ 327 for (i = 0; i < len; i++) { 328 trid->trstring[i] = toupper(trstring[i]); 329 } 330 return 0; 331 } 332 333 int 334 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 335 struct spdk_nvmf_transport_poll_group *group, 336 struct spdk_nvmf_transport *transport, 337 uint32_t length) 338 { 339 /* length more than 1 io unit length will fail. */ 340 if (length >= transport->opts.io_unit_size) { 341 return -EINVAL; 342 } 343 344 req->iovcnt = 1; 345 req->iov[0].iov_base = (void *)0xDEADBEEF; 346 347 return 0; 348 } 349 350 351 void 352 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 353 bool dif_insert_or_strip) 354 { 355 uint64_t num_blocks; 356 357 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 358 num_blocks = ns->bdev->blockcnt; 359 nsdata->nsze = num_blocks; 360 nsdata->ncap = num_blocks; 361 nsdata->nuse = num_blocks; 362 nsdata->nlbaf = 0; 363 nsdata->flbas.format = 0; 364 nsdata->flbas.msb_format = 0; 365 nsdata->lbaf[0].lbads = spdk_u32log2(512); 366 } 367 368 const char * 369 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 370 { 371 return subsystem->sn; 372 } 373 374 const char * 375 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 376 { 377 return subsystem->mn; 378 } 379 380 static void 381 test_nvmf_tcp_create(void) 382 { 383 struct spdk_thread *thread; 384 struct spdk_nvmf_transport *transport; 385 struct spdk_nvmf_tcp_transport *ttransport; 386 struct spdk_nvmf_transport_opts opts; 387 struct spdk_sock_group grp = {}; 388 389 thread = spdk_thread_create(NULL, NULL); 390 SPDK_CU_ASSERT_FATAL(thread != NULL); 391 spdk_set_thread(thread); 392 393 MOCK_SET(spdk_sock_group_create, &grp); 394 395 /* case 1 */ 396 memset(&opts, 0, sizeof(opts)); 397 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 398 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 399 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 400 opts.max_io_size = UT_MAX_IO_SIZE; 401 opts.io_unit_size = UT_IO_UNIT_SIZE; 402 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 403 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 404 /* expect success */ 405 transport = nvmf_tcp_create(&opts); 406 CU_ASSERT_PTR_NOT_NULL(transport); 407 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 408 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 409 transport->opts = opts; 410 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 411 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 412 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 413 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 414 /* destroy transport */ 415 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 416 417 /* case 2 */ 418 memset(&opts, 0, sizeof(opts)); 419 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 420 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 421 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 422 opts.max_io_size = UT_MAX_IO_SIZE; 423 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 424 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 425 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 426 /* expect success */ 427 transport = nvmf_tcp_create(&opts); 428 CU_ASSERT_PTR_NOT_NULL(transport); 429 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 430 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 431 transport->opts = opts; 432 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 433 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 434 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 435 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 436 /* destroy transport */ 437 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 438 439 /* case 3 */ 440 memset(&opts, 0, sizeof(opts)); 441 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 442 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 443 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 444 opts.max_io_size = UT_MAX_IO_SIZE; 445 opts.io_unit_size = 16; 446 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 447 /* expect fails */ 448 transport = nvmf_tcp_create(&opts); 449 CU_ASSERT_PTR_NULL(transport); 450 451 MOCK_CLEAR_P(spdk_sock_group_create); 452 453 spdk_thread_exit(thread); 454 while (!spdk_thread_is_exited(thread)) { 455 spdk_thread_poll(thread, 0, 0); 456 } 457 spdk_thread_destroy(thread); 458 } 459 460 static void 461 test_nvmf_tcp_destroy(void) 462 { 463 struct spdk_thread *thread; 464 struct spdk_nvmf_transport *transport; 465 struct spdk_nvmf_transport_opts opts; 466 struct spdk_sock_group grp = {}; 467 468 thread = spdk_thread_create(NULL, NULL); 469 SPDK_CU_ASSERT_FATAL(thread != NULL); 470 spdk_set_thread(thread); 471 472 /* case 1 */ 473 memset(&opts, 0, sizeof(opts)); 474 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 475 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 476 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 477 opts.max_io_size = UT_MAX_IO_SIZE; 478 opts.io_unit_size = UT_IO_UNIT_SIZE; 479 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 480 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 481 MOCK_SET(spdk_sock_group_create, &grp); 482 transport = nvmf_tcp_create(&opts); 483 MOCK_CLEAR_P(spdk_sock_group_create); 484 CU_ASSERT_PTR_NOT_NULL(transport); 485 transport->opts = opts; 486 /* destroy transport */ 487 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 488 489 spdk_thread_exit(thread); 490 while (!spdk_thread_is_exited(thread)) { 491 spdk_thread_poll(thread, 0, 0); 492 } 493 spdk_thread_destroy(thread); 494 } 495 496 static void 497 init_accel(void) 498 { 499 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 500 sizeof(int), "accel_p"); 501 } 502 503 static void 504 fini_accel(void) 505 { 506 spdk_io_device_unregister(g_accel_p, NULL); 507 } 508 509 static void 510 test_nvmf_tcp_poll_group_create(void) 511 { 512 struct spdk_nvmf_transport *transport; 513 struct spdk_nvmf_transport_poll_group *group; 514 struct spdk_nvmf_tcp_poll_group *tgroup; 515 struct spdk_thread *thread; 516 struct spdk_nvmf_transport_opts opts; 517 struct spdk_sock_group grp = {}; 518 519 thread = spdk_thread_create(NULL, NULL); 520 SPDK_CU_ASSERT_FATAL(thread != NULL); 521 spdk_set_thread(thread); 522 523 init_accel(); 524 525 memset(&opts, 0, sizeof(opts)); 526 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 527 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 528 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 529 opts.max_io_size = UT_MAX_IO_SIZE; 530 opts.io_unit_size = UT_IO_UNIT_SIZE; 531 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 532 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 533 MOCK_SET(spdk_sock_group_create, &grp); 534 transport = nvmf_tcp_create(&opts); 535 MOCK_CLEAR_P(spdk_sock_group_create); 536 CU_ASSERT_PTR_NOT_NULL(transport); 537 transport->opts = opts; 538 MOCK_SET(spdk_sock_group_create, &grp); 539 group = nvmf_tcp_poll_group_create(transport, NULL); 540 MOCK_CLEAR_P(spdk_sock_group_create); 541 SPDK_CU_ASSERT_FATAL(group); 542 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 543 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 544 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 545 } 546 group->transport = transport; 547 nvmf_tcp_poll_group_destroy(group); 548 nvmf_tcp_destroy(transport, NULL, NULL); 549 550 fini_accel(); 551 spdk_thread_exit(thread); 552 while (!spdk_thread_is_exited(thread)) { 553 spdk_thread_poll(thread, 0, 0); 554 } 555 spdk_thread_destroy(thread); 556 } 557 558 static void 559 test_nvmf_tcp_send_c2h_data(void) 560 { 561 struct spdk_thread *thread; 562 struct spdk_nvmf_tcp_transport ttransport = {}; 563 struct spdk_nvmf_tcp_qpair tqpair = {}; 564 struct spdk_nvmf_tcp_req tcp_req = {}; 565 struct nvme_tcp_pdu pdu = {}; 566 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 567 568 ttransport.tcp_opts.c2h_success = true; 569 thread = spdk_thread_create(NULL, NULL); 570 SPDK_CU_ASSERT_FATAL(thread != NULL); 571 spdk_set_thread(thread); 572 573 tcp_req.pdu = &pdu; 574 tcp_req.req.length = 300; 575 tcp_req.req.qpair = &tqpair.qpair; 576 577 tqpair.qpair.transport = &ttransport.transport; 578 579 /* Set qpair state to make unrelated operations NOP */ 580 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 581 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 582 583 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 584 585 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 586 tcp_req.req.iov[0].iov_len = 101; 587 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 588 tcp_req.req.iov[1].iov_len = 100; 589 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 590 tcp_req.req.iov[2].iov_len = 99; 591 tcp_req.req.iovcnt = 3; 592 tcp_req.req.length = 300; 593 594 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 595 596 c2h_data = &pdu.hdr.c2h_data; 597 CU_ASSERT(c2h_data->datao == 0); 598 CU_ASSERT(c2h_data->datal = 300); 599 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 600 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 601 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 602 603 CU_ASSERT(pdu.data_iovcnt == 3); 604 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 605 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 606 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 607 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 608 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 609 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 610 611 tcp_req.pdu_in_use = false; 612 tcp_req.rsp.cdw0 = 1; 613 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 614 615 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 616 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 617 618 ttransport.tcp_opts.c2h_success = false; 619 tcp_req.pdu_in_use = false; 620 tcp_req.rsp.cdw0 = 0; 621 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 622 623 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 624 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 625 626 tcp_req.pdu_in_use = false; 627 tcp_req.rsp.cdw0 = 1; 628 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 629 630 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 631 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 632 633 spdk_thread_exit(thread); 634 while (!spdk_thread_is_exited(thread)) { 635 spdk_thread_poll(thread, 0, 0); 636 } 637 spdk_thread_destroy(thread); 638 } 639 640 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 641 642 static void 643 test_nvmf_tcp_h2c_data_hdr_handle(void) 644 { 645 struct spdk_nvmf_tcp_transport ttransport = {}; 646 struct spdk_nvmf_tcp_qpair tqpair = {}; 647 struct nvme_tcp_pdu pdu = {}; 648 struct spdk_nvmf_tcp_req tcp_req = {}; 649 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 650 651 /* Set qpair state to make unrelated operations NOP */ 652 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 653 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 654 tqpair.resource_count = 1; 655 tqpair.reqs = &tcp_req; 656 657 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 658 tcp_req.req.iov[0].iov_len = 101; 659 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 660 tcp_req.req.iov[1].iov_len = 99; 661 tcp_req.req.iovcnt = 2; 662 tcp_req.req.length = 200; 663 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 664 665 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 666 tcp_req.req.cmd->nvme_cmd.cid = 1; 667 tcp_req.ttag = 1; 668 669 h2c_data = &pdu.hdr.h2c_data; 670 h2c_data->cccid = 1; 671 h2c_data->ttag = 1; 672 h2c_data->datao = 0; 673 h2c_data->datal = 200; 674 675 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 676 677 CU_ASSERT(pdu.data_iovcnt == 2); 678 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 679 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 680 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 681 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 682 } 683 684 685 static void 686 test_nvmf_tcp_in_capsule_data_handle(void) 687 { 688 struct spdk_nvmf_tcp_transport ttransport = {}; 689 struct spdk_nvmf_transport_ops ops = {}; 690 struct spdk_nvmf_tcp_qpair tqpair = {}; 691 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 692 union nvmf_c2h_msg rsp0 = {}; 693 union nvmf_c2h_msg rsp = {}; 694 695 struct spdk_nvmf_tcp_req tcp_req2 = {}; 696 struct spdk_nvmf_tcp_req tcp_req1 = {}; 697 698 struct spdk_nvme_tcp_cmd *capsule_data; 699 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 700 struct spdk_nvme_sgl_descriptor *sgl; 701 702 struct spdk_nvmf_transport_poll_group *group; 703 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 704 struct spdk_sock_group grp = {}; 705 706 tqpair.pdu_in_progress = &pdu_in_progress; 707 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 708 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 709 ttransport.transport.ops = &ops; 710 ops.req_get_buffers_done = nvmf_tcp_req_get_buffers_done; 711 712 tcp_group.sock_group = &grp; 713 TAILQ_INIT(&tcp_group.qpairs); 714 group = &tcp_group.group; 715 group->transport = &ttransport.transport; 716 tqpair.group = &tcp_group; 717 718 TAILQ_INIT(&tqpair.tcp_req_free_queue); 719 TAILQ_INIT(&tqpair.tcp_req_working_queue); 720 721 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 722 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 723 tqpair.qpair.transport = &ttransport.transport; 724 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 725 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 726 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 727 728 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 729 tcp_req2.req.qpair = &tqpair.qpair; 730 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 731 tcp_req2.req.rsp = &rsp; 732 733 /* init tcp_req1 */ 734 tcp_req1.req.qpair = &tqpair.qpair; 735 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 736 tcp_req1.req.rsp = &rsp0; 737 tcp_req1.state = TCP_REQUEST_STATE_NEW; 738 tcp_req1.req.data_from_pool = false; 739 740 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 741 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 742 743 /* init pdu, make pdu need sgl buff */ 744 pdu = tqpair.pdu_in_progress; 745 capsule_data = &pdu->hdr.capsule_cmd; 746 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 747 sgl = &capsule_data->ccsqe.dptr.sgl1; 748 749 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 750 capsule_data->common.hlen = sizeof(*capsule_data); 751 capsule_data->common.plen = 1096; 752 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 753 754 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 755 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 756 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 757 758 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 759 760 /* pretend that tcp_req1 is waiting in the iobuf waiting queue */ 761 nvmf_tcp_req_process(&ttransport, &tcp_req1); 762 CU_ASSERT(tcp_req1.req.data_from_pool == false); 763 764 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 765 766 /* process tqpair capsule req. */ 767 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 768 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 769 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 770 771 /* pretend that buffer for tcp_req1 becomes available */ 772 spdk_nvmf_request_get_buffers(&tcp_req1.req, group, &ttransport.transport, UT_IO_UNIT_SIZE - 1); 773 /* trigger callback as nvmf_request_iobuf_get_cb would */ 774 ttransport.transport.ops->req_get_buffers_done(&tcp_req1.req); 775 CU_ASSERT(tcp_req1.state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 776 } 777 778 static void 779 test_nvmf_tcp_qpair_init_mem_resource(void) 780 { 781 int rc; 782 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 783 struct spdk_nvmf_transport transport = {}; 784 struct spdk_thread *thread; 785 786 thread = spdk_thread_create(NULL, NULL); 787 SPDK_CU_ASSERT_FATAL(thread != NULL); 788 spdk_set_thread(thread); 789 790 tqpair = calloc(1, sizeof(*tqpair)); 791 tqpair->qpair.transport = &transport; 792 793 nvmf_tcp_opts_init(&transport.opts); 794 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 795 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 796 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 797 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 798 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 799 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 800 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 801 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 802 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 803 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 804 CU_ASSERT(transport.opts.transport_specific == NULL); 805 806 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 807 CU_ASSERT(rc == 0); 808 CU_ASSERT(tqpair->host_hdgst_enable == true); 809 CU_ASSERT(tqpair->host_ddgst_enable == true); 810 811 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 812 CU_ASSERT(rc == 0); 813 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 814 CU_ASSERT(tqpair->reqs != NULL); 815 CU_ASSERT(tqpair->bufs != NULL); 816 CU_ASSERT(tqpair->pdus != NULL); 817 /* Just to check the first and last entry */ 818 CU_ASSERT(tqpair->reqs[0].ttag == 1); 819 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 820 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 821 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 822 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 823 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 824 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 825 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 826 CU_ASSERT(tqpair->reqs[127].ttag == 128); 827 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 828 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 829 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 830 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 831 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 832 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 833 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 834 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 835 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 836 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 837 CU_ASSERT(tqpair->pdu_in_progress == 838 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 839 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 840 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 841 842 /* Free all of tqpair resource */ 843 nvmf_tcp_qpair_destroy(tqpair); 844 845 spdk_thread_exit(thread); 846 while (!spdk_thread_is_exited(thread)) { 847 spdk_thread_poll(thread, 0, 0); 848 } 849 spdk_thread_destroy(thread); 850 } 851 852 static void 853 test_nvmf_tcp_send_c2h_term_req(void) 854 { 855 struct spdk_nvmf_tcp_qpair tqpair = {}; 856 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 857 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 858 uint32_t error_offset = 1; 859 860 mgmt_pdu.qpair = &tqpair; 861 tqpair.mgmt_pdu = &mgmt_pdu; 862 tqpair.pdu_in_progress = &pdu_in_progress; 863 tqpair.tcp_pdu_working_count = 1; 864 865 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 866 pdu.hdr.common.hlen = 64; 867 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 868 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 869 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 870 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 871 pdu.hdr.common.hlen); 872 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 873 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 874 875 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 876 pdu.hdr.common.hlen = 255; 877 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 878 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 879 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 880 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 881 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 882 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 883 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 884 } 885 886 static void 887 test_nvmf_tcp_send_capsule_resp_pdu(void) 888 { 889 struct spdk_nvmf_tcp_req tcp_req = {}; 890 struct spdk_nvmf_tcp_qpair tqpair = {}; 891 struct nvme_tcp_pdu pdu = {}; 892 893 tcp_req.pdu_in_use = false; 894 tcp_req.req.qpair = &tqpair.qpair; 895 tcp_req.pdu = &pdu; 896 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 897 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 898 tqpair.host_hdgst_enable = true; 899 900 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 901 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 902 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 903 SPDK_NVME_TCP_DIGEST_LEN); 904 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 905 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 906 sizeof(struct spdk_nvme_cpl))); 907 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 908 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 909 CU_ASSERT(pdu.cb_arg == &tcp_req); 910 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 911 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 912 913 /* hdgst disable */ 914 tqpair.host_hdgst_enable = false; 915 tcp_req.pdu_in_use = false; 916 memset(&pdu, 0, sizeof(pdu)); 917 918 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 919 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 920 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 921 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 922 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 923 sizeof(struct spdk_nvme_cpl))); 924 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 925 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 926 CU_ASSERT(pdu.cb_arg == &tcp_req); 927 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 928 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 929 } 930 931 static void 932 test_nvmf_tcp_icreq_handle(void) 933 { 934 struct spdk_nvmf_tcp_transport ttransport = {}; 935 struct spdk_nvmf_tcp_qpair tqpair = {}; 936 struct nvme_tcp_pdu pdu = {}; 937 struct nvme_tcp_pdu mgmt_pdu = {}; 938 struct nvme_tcp_pdu pdu_in_progress = {}; 939 struct spdk_nvme_tcp_ic_resp *ic_resp; 940 941 mgmt_pdu.qpair = &tqpair; 942 tqpair.mgmt_pdu = &mgmt_pdu; 943 tqpair.pdu_in_progress = &pdu_in_progress; 944 tqpair.tcp_pdu_working_count = 1; 945 946 /* case 1: Expected ICReq PFV 0 and got are different. */ 947 pdu.hdr.ic_req.pfv = 1; 948 949 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 950 951 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 952 953 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 954 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 955 956 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 957 958 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 959 960 /* case 3: Expect: PASS. */ 961 ttransport.transport.opts.max_io_size = 32; 962 pdu.hdr.ic_req.pfv = 0; 963 tqpair.host_hdgst_enable = false; 964 tqpair.host_ddgst_enable = false; 965 tqpair.recv_buf_size = 64; 966 pdu.hdr.ic_req.hpda = 16; 967 968 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 969 970 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 971 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 972 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 973 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 974 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 975 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 976 CU_ASSERT(ic_resp->pfv == 0); 977 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 978 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 979 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 980 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 981 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 982 } 983 984 static void 985 test_nvmf_tcp_check_xfer_type(void) 986 { 987 const uint16_t cid = 0xAA; 988 struct spdk_nvmf_tcp_transport ttransport = {}; 989 struct spdk_nvmf_tcp_qpair tqpair = {}; 990 struct nvme_tcp_pdu pdu_in_progress = {}; 991 union nvmf_c2h_msg rsp0 = {}; 992 993 struct spdk_nvmf_tcp_req tcp_req = {}; 994 struct nvme_tcp_pdu rsp_pdu = {}; 995 996 struct spdk_nvme_tcp_cmd *capsule_data; 997 struct spdk_nvme_sgl_descriptor *sgl; 998 999 struct spdk_nvmf_transport_poll_group *group; 1000 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1001 struct spdk_sock_group grp = {}; 1002 1003 tqpair.pdu_in_progress = &pdu_in_progress; 1004 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1005 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1006 1007 tcp_group.sock_group = &grp; 1008 TAILQ_INIT(&tcp_group.qpairs); 1009 group = &tcp_group.group; 1010 group->transport = &ttransport.transport; 1011 tqpair.group = &tcp_group; 1012 1013 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1014 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1015 1016 tqpair.qpair.transport = &ttransport.transport; 1017 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1018 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1019 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1020 1021 /* init tcp_req */ 1022 tcp_req.req.qpair = &tqpair.qpair; 1023 tcp_req.pdu = &rsp_pdu; 1024 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1025 tcp_req.req.rsp = &rsp0; 1026 tcp_req.state = TCP_REQUEST_STATE_NEW; 1027 1028 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1029 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1030 1031 /* init pdu, make pdu need sgl buff */ 1032 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1033 sgl = &capsule_data->ccsqe.dptr.sgl1; 1034 1035 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1036 capsule_data->common.hlen = sizeof(*capsule_data); 1037 capsule_data->common.plen = 1096; 1038 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1039 /* Need to set to a non zero valid to check it gets copied to the response */ 1040 capsule_data->ccsqe.cid = cid; 1041 1042 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1043 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1044 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1045 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1046 1047 /* Process a command and ensure that it fails and the request is set up to return an error */ 1048 nvmf_tcp_req_process(&ttransport, &tcp_req); 1049 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1050 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1051 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1052 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1053 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1054 } 1055 1056 static void 1057 test_nvmf_tcp_invalid_sgl(void) 1058 { 1059 const uint16_t cid = 0xAABB; 1060 struct spdk_nvmf_tcp_transport ttransport = {}; 1061 struct spdk_nvmf_tcp_qpair tqpair = {}; 1062 struct nvme_tcp_pdu pdu_in_progress = {}; 1063 union nvmf_c2h_msg rsp0 = {}; 1064 1065 struct spdk_nvmf_tcp_req tcp_req = {}; 1066 struct nvme_tcp_pdu rsp_pdu = {}; 1067 struct nvme_tcp_pdu mgmt_pdu = {}; 1068 1069 struct spdk_nvme_tcp_cmd *capsule_data; 1070 struct spdk_nvme_sgl_descriptor *sgl; 1071 1072 struct spdk_nvmf_transport_poll_group *group; 1073 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1074 struct spdk_sock_group grp = {}; 1075 1076 tqpair.pdu_in_progress = &pdu_in_progress; 1077 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1078 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1079 1080 tcp_group.sock_group = &grp; 1081 TAILQ_INIT(&tcp_group.qpairs); 1082 group = &tcp_group.group; 1083 group->transport = &ttransport.transport; 1084 tqpair.group = &tcp_group; 1085 1086 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1087 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1088 1089 tqpair.qpair.transport = &ttransport.transport; 1090 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1091 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1092 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1093 1094 /* init tcp_req */ 1095 tcp_req.req.qpair = &tqpair.qpair; 1096 tcp_req.pdu = &rsp_pdu; 1097 tcp_req.pdu->qpair = &tqpair; 1098 tqpair.mgmt_pdu = &mgmt_pdu; 1099 tqpair.mgmt_pdu->qpair = &tqpair; 1100 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1101 tcp_req.req.rsp = &rsp0; 1102 tcp_req.state = TCP_REQUEST_STATE_NEW; 1103 1104 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1105 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1106 1107 /* init pdu, make pdu need sgl buff */ 1108 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1109 sgl = &capsule_data->ccsqe.dptr.sgl1; 1110 1111 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1112 capsule_data->common.hlen = sizeof(*capsule_data); 1113 capsule_data->common.plen = 1096; 1114 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1115 /* Need to set to a non zero valid to check it gets copied to the response */ 1116 capsule_data->ccsqe.cid = cid; 1117 1118 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1119 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1120 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1121 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1122 1123 /* Process a command and ensure that it fails and the request is set up to return an error */ 1124 nvmf_tcp_req_process(&ttransport, &tcp_req); 1125 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1126 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1127 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1128 } 1129 1130 static void 1131 test_nvmf_tcp_pdu_ch_handle(void) 1132 { 1133 struct spdk_nvmf_tcp_qpair tqpair = {}; 1134 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1135 1136 mgmt_pdu.qpair = &tqpair; 1137 tqpair.mgmt_pdu = &mgmt_pdu; 1138 tqpair.pdu_in_progress = &pdu_in_progress; 1139 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1140 tqpair.cpda = 0; 1141 1142 /* Test case: Already received ICreq PDU. Expect: fail */ 1143 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1144 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1145 nvmf_tcp_pdu_ch_handle(&tqpair); 1146 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1147 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1148 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1149 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1150 1151 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1152 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1153 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1154 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1155 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1156 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1157 nvmf_tcp_pdu_ch_handle(&tqpair); 1158 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1159 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1160 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1161 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1162 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1163 1164 /* Test case: The TCP/IP tqpair connection is not negotiated. Expect: fail */ 1165 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1166 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1167 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1168 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1169 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1170 nvmf_tcp_pdu_ch_handle(&tqpair); 1171 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1172 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1173 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1174 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1175 1176 /* Test case: Unexpected PDU type. Expect: fail */ 1177 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1178 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1179 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1180 tqpair.pdu_in_progress->hdr.common.plen = 0; 1181 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1182 nvmf_tcp_pdu_ch_handle(&tqpair); 1183 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1184 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1185 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1186 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1187 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1188 1189 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1190 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1191 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1192 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1193 tqpair.pdu_in_progress->hdr.common.plen = 0; 1194 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1195 nvmf_tcp_pdu_ch_handle(&tqpair); 1196 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1197 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1198 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1199 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1200 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1201 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1202 1203 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1204 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1205 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1206 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1207 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1208 tqpair.pdu_in_progress->hdr.common.plen = 0; 1209 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1210 nvmf_tcp_pdu_ch_handle(&tqpair); 1211 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1212 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1213 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1214 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1215 struct spdk_nvme_tcp_term_req_hdr)); 1216 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1217 1218 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1219 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1220 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1221 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1222 tqpair.pdu_in_progress->hdr.common.plen = 0; 1223 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1224 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1225 nvmf_tcp_pdu_ch_handle(&tqpair); 1226 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1227 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1228 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1229 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1230 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1231 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1232 1233 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1234 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1235 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1236 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1237 tqpair.pdu_in_progress->hdr.common.plen = 0; 1238 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1239 nvmf_tcp_pdu_ch_handle(&tqpair); 1240 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1241 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1242 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1243 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1244 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1245 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1246 1247 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1248 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1249 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1250 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1251 tqpair.cpda = 1; 1252 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1253 tqpair.pdu_in_progress->hdr.common.plen = 0; 1254 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1255 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1256 nvmf_tcp_pdu_ch_handle(&tqpair); 1257 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1258 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1259 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1260 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1261 struct spdk_nvme_tcp_term_req_hdr)); 1262 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1263 1264 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1265 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1266 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1267 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1268 tqpair.cpda = 1; 1269 tqpair.pdu_in_progress->hdr.common.plen = 0; 1270 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1271 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1272 nvmf_tcp_pdu_ch_handle(&tqpair); 1273 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1274 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1275 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1276 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1277 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1278 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1279 1280 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1281 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1282 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1283 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1284 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1285 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1286 nvmf_tcp_pdu_ch_handle(&tqpair); 1287 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1288 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1289 struct spdk_nvme_tcp_common_pdu_hdr)); 1290 } 1291 1292 static void 1293 test_nvmf_tcp_tls_add_remove_credentials(void) 1294 { 1295 struct spdk_thread *thread; 1296 struct spdk_nvmf_transport *transport; 1297 struct spdk_nvmf_tcp_transport *ttransport; 1298 struct spdk_nvmf_transport_opts opts; 1299 struct spdk_nvmf_subsystem subsystem; 1300 struct tcp_psk_entry *entry; 1301 struct spdk_sock_group grp = {}; 1302 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1303 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1304 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1305 char *psk_file_path = "/tmp/psk.txt"; 1306 bool found = false; 1307 FILE *psk_file = NULL; 1308 mode_t oldmask; 1309 1310 thread = spdk_thread_create(NULL, NULL); 1311 SPDK_CU_ASSERT_FATAL(thread != NULL); 1312 spdk_set_thread(thread); 1313 1314 memset(&opts, 0, sizeof(opts)); 1315 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1316 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1317 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1318 opts.max_io_size = UT_MAX_IO_SIZE; 1319 opts.io_unit_size = UT_IO_UNIT_SIZE; 1320 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1321 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1322 MOCK_SET(spdk_sock_group_create, &grp); 1323 transport = nvmf_tcp_create(&opts); 1324 MOCK_CLEAR_P(spdk_sock_group_create); 1325 1326 memset(&subsystem, 0, sizeof(subsystem)); 1327 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1328 1329 /* Create a text file containing PSK in interchange format. */ 1330 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1331 psk_file = fopen(psk_file_path, "w"); 1332 CU_ASSERT(psk_file != NULL); 1333 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1334 CU_ASSERT(fclose(psk_file) == 0); 1335 umask(oldmask); 1336 1337 struct spdk_json_val psk_json[] = { 1338 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1339 {"psk", 3, SPDK_JSON_VAL_NAME}, 1340 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1341 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1342 }; 1343 1344 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1345 1346 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1347 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1348 if ((strcmp(subnqn, entry->subnqn) == 0) && 1349 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1350 found = true; 1351 } 1352 } 1353 1354 CU_ASSERT(found == true); 1355 found = false; 1356 1357 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1358 1359 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1360 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1361 if ((strcmp(subnqn, entry->subnqn) == 0) && 1362 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1363 found = true; 1364 } 1365 } 1366 1367 CU_ASSERT(found == false); 1368 1369 CU_ASSERT(remove(psk_file_path) == 0); 1370 1371 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1372 1373 spdk_thread_exit(thread); 1374 while (!spdk_thread_is_exited(thread)) { 1375 spdk_thread_poll(thread, 0, 0); 1376 } 1377 spdk_thread_destroy(thread); 1378 } 1379 1380 static void 1381 test_nvmf_tcp_tls_generate_psk_id(void) 1382 { 1383 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1384 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1385 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1386 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1387 char too_small_psk_id[5] = {}; 1388 1389 /* Check if we can generate expected PSK id. */ 1390 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1391 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1392 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1393 1394 /* Test with a buffer that is too small to fit PSK id. */ 1395 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1396 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1397 1398 /* Test with unknown cipher suite. */ 1399 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1400 subnqn, UINT8_MAX) != 0); 1401 } 1402 1403 static void 1404 test_nvmf_tcp_tls_generate_retained_psk(void) 1405 { 1406 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1407 const char psk_reference1[] = {"1234567890ABCDEF"}; 1408 const char psk_reference2[] = {"FEDCBA0987654321"}; 1409 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1410 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1411 char *unhexlified1; 1412 char *unhexlified2; 1413 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1414 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1415 uint8_t too_small_psk_retained[5] = {}; 1416 int psk_retained_len1, psk_retained_len2; 1417 int retained_size; 1418 1419 unhexlified1 = spdk_unhexlify(psk_reference1); 1420 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1421 unhexlified2 = spdk_unhexlify(psk_reference2); 1422 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1423 1424 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1425 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1426 free(unhexlified1); 1427 free(unhexlified2); 1428 1429 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1430 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1431 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1432 CU_ASSERT(retained_size > 0); 1433 1434 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1435 psk_retained2, 1436 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1437 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1438 1439 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1440 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1441 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1442 CU_ASSERT(psk_retained_len1 > 0); 1443 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1444 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1445 CU_ASSERT(psk_retained_len2 > 0); 1446 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1447 1448 /* Make sure that passing unknown value as hash errors out the function. */ 1449 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1450 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1451 1452 /* Make sure that passing buffer insufficient in size errors out the function. */ 1453 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1454 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1455 } 1456 1457 static void 1458 test_nvmf_tcp_tls_generate_tls_psk(void) 1459 { 1460 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1461 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1462 const char psk_reference[] = {"1234567890ABCDEF"}; 1463 char *unhexlified; 1464 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1465 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1466 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1467 uint8_t too_small_psk_tls[5] = {}; 1468 int retained_size, tls_size; 1469 1470 unhexlified = spdk_unhexlify(psk_reference); 1471 CU_ASSERT(unhexlified != NULL); 1472 1473 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1474 free(unhexlified); 1475 1476 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1477 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1478 CU_ASSERT(retained_size > 0); 1479 1480 /* Make sure that different cipher suites produce different TLS PSKs. */ 1481 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1482 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1483 CU_ASSERT(tls_size > 0); 1484 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1485 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1486 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1487 1488 /* Make sure that passing unknown value as hash errors out the function. */ 1489 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1490 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1491 1492 /* Make sure that passing buffer insufficient in size errors out the function. */ 1493 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1494 too_small_psk_tls, sizeof(too_small_psk_tls), 1495 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1496 } 1497 1498 int 1499 main(int argc, char **argv) 1500 { 1501 CU_pSuite suite = NULL; 1502 unsigned int num_failures; 1503 1504 CU_initialize_registry(); 1505 1506 suite = CU_add_suite("nvmf", NULL, NULL); 1507 1508 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1509 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1510 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1511 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1512 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1513 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1514 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1515 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1516 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1517 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1518 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1519 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1520 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1521 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1522 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1523 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1524 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1525 1526 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1527 CU_cleanup_registry(); 1528 return num_failures; 1529 } 1530