1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 90 (struct spdk_nvmf_subsystem *subsystem), false); 91 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 92 bool, 93 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 94 true); 95 96 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 97 bool, 98 (struct spdk_nvmf_ctrlr *ctrlr), 99 false); 100 101 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 102 bool, 103 (struct spdk_nvmf_ctrlr *ctrlr), 104 false); 105 106 DEFINE_STUB(nvmf_ctrlr_copy_supported, 107 bool, 108 (struct spdk_nvmf_ctrlr *ctrlr), 109 false); 110 111 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 112 int, 113 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 114 struct spdk_nvmf_request *req), 115 0); 116 117 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 118 int, 119 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 120 struct spdk_nvmf_request *req), 121 0); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 169 0); 170 171 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 172 bool, 173 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 174 false); 175 176 DEFINE_STUB(nvmf_transport_req_complete, 177 int, 178 (struct spdk_nvmf_request *req), 179 0); 180 181 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 182 bool, 183 (struct spdk_bdev *bdev), 184 false); 185 186 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 187 int, 188 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 189 struct spdk_nvmf_request *req), 190 0); 191 192 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 193 194 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 195 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 196 struct spdk_nvmf_transport *transport)); 197 198 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 199 int, 200 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 201 0); 202 203 DEFINE_STUB(spdk_sock_group_get_ctx, 204 void *, 205 (struct spdk_sock_group *group), 206 NULL); 207 208 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 209 210 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 211 enum spdk_nvme_transport_type trtype)); 212 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 213 214 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 215 216 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 217 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 218 219 DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s)); 220 221 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 222 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 223 224 DEFINE_STUB(nvmf_transport_req_free, 225 int, 226 (struct spdk_nvmf_request *req), 227 0); 228 229 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 230 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 231 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 232 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 233 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 234 235 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 236 (const struct spdk_bdev *bdev), 0); 237 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 238 (const struct spdk_bdev *bdev), 0); 239 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 240 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 241 242 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 243 (const struct spdk_nvme_ns_data *nsdata), 0); 244 245 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 246 247 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 248 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 249 (const struct spdk_nvmf_subsystem *subsystem), NULL); 250 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 251 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 252 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 253 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 254 255 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 256 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 257 false); 258 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 259 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 260 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 261 DEFINE_STUB(nvmf_request_get_buffers_abort, bool, (struct spdk_nvmf_request *r), false); 262 263 struct spdk_io_channel * 264 spdk_accel_get_io_channel(void) 265 { 266 return spdk_get_io_channel(g_accel_p); 267 } 268 269 DEFINE_STUB(spdk_accel_submit_crc32cv, 270 int, 271 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 272 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 273 0); 274 275 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 276 int, 277 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 278 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 279 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 280 0) 281 282 struct spdk_bdev { 283 int ut_mock; 284 uint64_t blockcnt; 285 }; 286 287 int 288 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 289 const struct spdk_nvme_transport_id *trid2) 290 { 291 return 0; 292 } 293 294 const char * 295 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 296 { 297 switch (trtype) { 298 case SPDK_NVME_TRANSPORT_PCIE: 299 return "PCIe"; 300 case SPDK_NVME_TRANSPORT_RDMA: 301 return "RDMA"; 302 case SPDK_NVME_TRANSPORT_FC: 303 return "FC"; 304 default: 305 return NULL; 306 } 307 } 308 309 int 310 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 311 { 312 int len, i; 313 314 if (trstring == NULL) { 315 return -EINVAL; 316 } 317 318 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 319 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 320 return -EINVAL; 321 } 322 323 /* cast official trstring to uppercase version of input. */ 324 for (i = 0; i < len; i++) { 325 trid->trstring[i] = toupper(trstring[i]); 326 } 327 return 0; 328 } 329 330 int 331 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 332 struct spdk_nvmf_transport_poll_group *group, 333 struct spdk_nvmf_transport *transport, 334 uint32_t length) 335 { 336 /* length more than 1 io unit length will fail. */ 337 if (length >= transport->opts.io_unit_size) { 338 return -EINVAL; 339 } 340 341 req->iovcnt = 1; 342 req->iov[0].iov_base = (void *)0xDEADBEEF; 343 344 return 0; 345 } 346 347 348 void 349 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 350 bool dif_insert_or_strip) 351 { 352 uint64_t num_blocks; 353 354 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 355 num_blocks = ns->bdev->blockcnt; 356 nsdata->nsze = num_blocks; 357 nsdata->ncap = num_blocks; 358 nsdata->nuse = num_blocks; 359 nsdata->nlbaf = 0; 360 nsdata->flbas.format = 0; 361 nsdata->flbas.msb_format = 0; 362 nsdata->lbaf[0].lbads = spdk_u32log2(512); 363 } 364 365 const char * 366 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 367 { 368 return subsystem->sn; 369 } 370 371 const char * 372 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 373 { 374 return subsystem->mn; 375 } 376 377 static void 378 test_nvmf_tcp_create(void) 379 { 380 struct spdk_thread *thread; 381 struct spdk_nvmf_transport *transport; 382 struct spdk_nvmf_tcp_transport *ttransport; 383 struct spdk_nvmf_transport_opts opts; 384 struct spdk_sock_group grp = {}; 385 386 thread = spdk_thread_create(NULL, NULL); 387 SPDK_CU_ASSERT_FATAL(thread != NULL); 388 spdk_set_thread(thread); 389 390 MOCK_SET(spdk_sock_group_create, &grp); 391 392 /* case 1 */ 393 memset(&opts, 0, sizeof(opts)); 394 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 395 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 396 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 397 opts.max_io_size = UT_MAX_IO_SIZE; 398 opts.io_unit_size = UT_IO_UNIT_SIZE; 399 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 400 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 401 /* expect success */ 402 transport = nvmf_tcp_create(&opts); 403 CU_ASSERT_PTR_NOT_NULL(transport); 404 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 405 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 406 transport->opts = opts; 407 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 408 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 409 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 410 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 411 /* destroy transport */ 412 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 413 414 /* case 2 */ 415 memset(&opts, 0, sizeof(opts)); 416 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 417 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 418 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 419 opts.max_io_size = UT_MAX_IO_SIZE; 420 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 421 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 422 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 423 /* expect success */ 424 transport = nvmf_tcp_create(&opts); 425 CU_ASSERT_PTR_NOT_NULL(transport); 426 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 427 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 428 transport->opts = opts; 429 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 430 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 431 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 432 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 433 /* destroy transport */ 434 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 435 436 /* case 3 */ 437 memset(&opts, 0, sizeof(opts)); 438 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 439 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 440 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 441 opts.max_io_size = UT_MAX_IO_SIZE; 442 opts.io_unit_size = 16; 443 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 444 /* expect fails */ 445 transport = nvmf_tcp_create(&opts); 446 CU_ASSERT_PTR_NULL(transport); 447 448 MOCK_CLEAR_P(spdk_sock_group_create); 449 450 spdk_thread_exit(thread); 451 while (!spdk_thread_is_exited(thread)) { 452 spdk_thread_poll(thread, 0, 0); 453 } 454 spdk_thread_destroy(thread); 455 } 456 457 static void 458 test_nvmf_tcp_destroy(void) 459 { 460 struct spdk_thread *thread; 461 struct spdk_nvmf_transport *transport; 462 struct spdk_nvmf_transport_opts opts; 463 struct spdk_sock_group grp = {}; 464 465 thread = spdk_thread_create(NULL, NULL); 466 SPDK_CU_ASSERT_FATAL(thread != NULL); 467 spdk_set_thread(thread); 468 469 /* case 1 */ 470 memset(&opts, 0, sizeof(opts)); 471 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 472 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 473 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 474 opts.max_io_size = UT_MAX_IO_SIZE; 475 opts.io_unit_size = UT_IO_UNIT_SIZE; 476 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 477 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 478 MOCK_SET(spdk_sock_group_create, &grp); 479 transport = nvmf_tcp_create(&opts); 480 MOCK_CLEAR_P(spdk_sock_group_create); 481 CU_ASSERT_PTR_NOT_NULL(transport); 482 transport->opts = opts; 483 /* destroy transport */ 484 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 485 486 spdk_thread_exit(thread); 487 while (!spdk_thread_is_exited(thread)) { 488 spdk_thread_poll(thread, 0, 0); 489 } 490 spdk_thread_destroy(thread); 491 } 492 493 static void 494 init_accel(void) 495 { 496 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 497 sizeof(int), "accel_p"); 498 } 499 500 static void 501 fini_accel(void) 502 { 503 spdk_io_device_unregister(g_accel_p, NULL); 504 } 505 506 static void 507 test_nvmf_tcp_poll_group_create(void) 508 { 509 struct spdk_nvmf_transport *transport; 510 struct spdk_nvmf_transport_poll_group *group; 511 struct spdk_nvmf_tcp_poll_group *tgroup; 512 struct spdk_thread *thread; 513 struct spdk_nvmf_transport_opts opts; 514 struct spdk_sock_group grp = {}; 515 516 thread = spdk_thread_create(NULL, NULL); 517 SPDK_CU_ASSERT_FATAL(thread != NULL); 518 spdk_set_thread(thread); 519 520 init_accel(); 521 522 memset(&opts, 0, sizeof(opts)); 523 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 524 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 525 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 526 opts.max_io_size = UT_MAX_IO_SIZE; 527 opts.io_unit_size = UT_IO_UNIT_SIZE; 528 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 529 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 530 MOCK_SET(spdk_sock_group_create, &grp); 531 transport = nvmf_tcp_create(&opts); 532 MOCK_CLEAR_P(spdk_sock_group_create); 533 CU_ASSERT_PTR_NOT_NULL(transport); 534 transport->opts = opts; 535 MOCK_SET(spdk_sock_group_create, &grp); 536 group = nvmf_tcp_poll_group_create(transport, NULL); 537 MOCK_CLEAR_P(spdk_sock_group_create); 538 SPDK_CU_ASSERT_FATAL(group); 539 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 540 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 541 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 542 } 543 group->transport = transport; 544 nvmf_tcp_poll_group_destroy(group); 545 nvmf_tcp_destroy(transport, NULL, NULL); 546 547 fini_accel(); 548 spdk_thread_exit(thread); 549 while (!spdk_thread_is_exited(thread)) { 550 spdk_thread_poll(thread, 0, 0); 551 } 552 spdk_thread_destroy(thread); 553 } 554 555 static void 556 test_nvmf_tcp_send_c2h_data(void) 557 { 558 struct spdk_thread *thread; 559 struct spdk_nvmf_tcp_transport ttransport = {}; 560 struct spdk_nvmf_tcp_qpair tqpair = {}; 561 struct spdk_nvmf_tcp_req tcp_req = {}; 562 struct nvme_tcp_pdu pdu = {}; 563 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 564 565 ttransport.tcp_opts.c2h_success = true; 566 thread = spdk_thread_create(NULL, NULL); 567 SPDK_CU_ASSERT_FATAL(thread != NULL); 568 spdk_set_thread(thread); 569 570 tcp_req.pdu = &pdu; 571 tcp_req.req.length = 300; 572 tcp_req.req.qpair = &tqpair.qpair; 573 574 tqpair.qpair.transport = &ttransport.transport; 575 576 /* Set qpair state to make unrelated operations NOP */ 577 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 578 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 579 580 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 581 582 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 583 tcp_req.req.iov[0].iov_len = 101; 584 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 585 tcp_req.req.iov[1].iov_len = 100; 586 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 587 tcp_req.req.iov[2].iov_len = 99; 588 tcp_req.req.iovcnt = 3; 589 tcp_req.req.length = 300; 590 591 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 592 593 c2h_data = &pdu.hdr.c2h_data; 594 CU_ASSERT(c2h_data->datao == 0); 595 CU_ASSERT(c2h_data->datal = 300); 596 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 597 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 598 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 599 600 CU_ASSERT(pdu.data_iovcnt == 3); 601 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 602 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 603 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 604 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 605 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 606 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 607 608 tcp_req.pdu_in_use = false; 609 tcp_req.rsp.cdw0 = 1; 610 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 611 612 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 613 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 614 615 ttransport.tcp_opts.c2h_success = false; 616 tcp_req.pdu_in_use = false; 617 tcp_req.rsp.cdw0 = 0; 618 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 619 620 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 621 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 622 623 tcp_req.pdu_in_use = false; 624 tcp_req.rsp.cdw0 = 1; 625 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 626 627 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 628 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 629 630 spdk_thread_exit(thread); 631 while (!spdk_thread_is_exited(thread)) { 632 spdk_thread_poll(thread, 0, 0); 633 } 634 spdk_thread_destroy(thread); 635 } 636 637 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 638 639 static void 640 test_nvmf_tcp_h2c_data_hdr_handle(void) 641 { 642 struct spdk_nvmf_tcp_transport ttransport = {}; 643 struct spdk_nvmf_tcp_qpair tqpair = {}; 644 struct nvme_tcp_pdu pdu = {}; 645 struct spdk_nvmf_tcp_req tcp_req = {}; 646 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 647 648 /* Set qpair state to make unrelated operations NOP */ 649 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 650 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 651 tqpair.resource_count = 1; 652 tqpair.reqs = &tcp_req; 653 654 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 655 tcp_req.req.iov[0].iov_len = 101; 656 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 657 tcp_req.req.iov[1].iov_len = 99; 658 tcp_req.req.iovcnt = 2; 659 tcp_req.req.length = 200; 660 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 661 662 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 663 tcp_req.req.cmd->nvme_cmd.cid = 1; 664 tcp_req.ttag = 1; 665 666 h2c_data = &pdu.hdr.h2c_data; 667 h2c_data->cccid = 1; 668 h2c_data->ttag = 1; 669 h2c_data->datao = 0; 670 h2c_data->datal = 200; 671 672 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 673 674 CU_ASSERT(pdu.data_iovcnt == 2); 675 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 676 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 677 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 678 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 679 } 680 681 682 static void 683 test_nvmf_tcp_in_capsule_data_handle(void) 684 { 685 struct spdk_nvmf_tcp_transport ttransport = {}; 686 struct spdk_nvmf_transport_ops ops = {}; 687 struct spdk_nvmf_tcp_qpair tqpair = {}; 688 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 689 union nvmf_c2h_msg rsp0 = {}; 690 union nvmf_c2h_msg rsp = {}; 691 692 struct spdk_nvmf_tcp_req tcp_req2 = {}; 693 struct spdk_nvmf_tcp_req tcp_req1 = {}; 694 695 struct spdk_nvme_tcp_cmd *capsule_data; 696 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 697 struct spdk_nvme_sgl_descriptor *sgl; 698 699 struct spdk_nvmf_transport_poll_group *group; 700 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 701 struct spdk_sock_group grp = {}; 702 703 tqpair.pdu_in_progress = &pdu_in_progress; 704 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 705 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 706 ttransport.transport.ops = &ops; 707 ops.req_get_buffers_done = nvmf_tcp_req_get_buffers_done; 708 709 tcp_group.sock_group = &grp; 710 TAILQ_INIT(&tcp_group.qpairs); 711 group = &tcp_group.group; 712 group->transport = &ttransport.transport; 713 tqpair.group = &tcp_group; 714 715 TAILQ_INIT(&tqpair.tcp_req_free_queue); 716 TAILQ_INIT(&tqpair.tcp_req_working_queue); 717 718 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 719 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 720 tqpair.qpair.transport = &ttransport.transport; 721 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 722 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 723 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 724 725 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 726 tcp_req2.req.qpair = &tqpair.qpair; 727 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 728 tcp_req2.req.rsp = &rsp; 729 730 /* init tcp_req1 */ 731 tcp_req1.req.qpair = &tqpair.qpair; 732 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 733 tcp_req1.req.rsp = &rsp0; 734 tcp_req1.state = TCP_REQUEST_STATE_NEW; 735 tcp_req1.req.data_from_pool = false; 736 737 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 738 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 739 740 /* init pdu, make pdu need sgl buff */ 741 pdu = tqpair.pdu_in_progress; 742 capsule_data = &pdu->hdr.capsule_cmd; 743 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 744 sgl = &capsule_data->ccsqe.dptr.sgl1; 745 746 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 747 capsule_data->common.hlen = sizeof(*capsule_data); 748 capsule_data->common.plen = 1096; 749 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 750 751 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 752 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 753 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 754 755 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 756 757 /* pretend that tcp_req1 is waiting in the iobuf waiting queue */ 758 nvmf_tcp_req_process(&ttransport, &tcp_req1); 759 CU_ASSERT(tcp_req1.req.data_from_pool == false); 760 761 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 762 763 /* process tqpair capsule req. */ 764 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 765 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 766 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 767 768 /* pretend that buffer for tcp_req1 becomes available */ 769 spdk_nvmf_request_get_buffers(&tcp_req1.req, group, &ttransport.transport, UT_IO_UNIT_SIZE - 1); 770 /* trigger callback as nvmf_request_iobuf_get_cb would */ 771 ttransport.transport.ops->req_get_buffers_done(&tcp_req1.req); 772 CU_ASSERT(tcp_req1.state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 773 } 774 775 static void 776 test_nvmf_tcp_qpair_init_mem_resource(void) 777 { 778 int rc; 779 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 780 struct spdk_nvmf_transport transport = {}; 781 struct spdk_thread *thread; 782 783 thread = spdk_thread_create(NULL, NULL); 784 SPDK_CU_ASSERT_FATAL(thread != NULL); 785 spdk_set_thread(thread); 786 787 tqpair = calloc(1, sizeof(*tqpair)); 788 tqpair->qpair.transport = &transport; 789 790 nvmf_tcp_opts_init(&transport.opts); 791 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 792 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 793 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 794 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 795 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 796 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 797 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 798 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 799 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 800 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 801 CU_ASSERT(transport.opts.transport_specific == NULL); 802 803 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 804 CU_ASSERT(rc == 0); 805 CU_ASSERT(tqpair->host_hdgst_enable == true); 806 CU_ASSERT(tqpair->host_ddgst_enable == true); 807 808 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 809 CU_ASSERT(rc == 0); 810 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 811 CU_ASSERT(tqpair->reqs != NULL); 812 CU_ASSERT(tqpair->bufs != NULL); 813 CU_ASSERT(tqpair->pdus != NULL); 814 /* Just to check the first and last entry */ 815 CU_ASSERT(tqpair->reqs[0].ttag == 1); 816 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 817 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 818 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 819 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 820 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 821 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 822 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 823 CU_ASSERT(tqpair->reqs[127].ttag == 128); 824 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 825 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 826 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 827 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 828 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 829 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 830 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 831 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 832 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 833 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 834 CU_ASSERT(tqpair->pdu_in_progress == 835 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 836 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 837 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 838 839 /* Free all of tqpair resource */ 840 nvmf_tcp_qpair_destroy(tqpair); 841 842 spdk_thread_exit(thread); 843 while (!spdk_thread_is_exited(thread)) { 844 spdk_thread_poll(thread, 0, 0); 845 } 846 spdk_thread_destroy(thread); 847 } 848 849 static void 850 test_nvmf_tcp_send_c2h_term_req(void) 851 { 852 struct spdk_nvmf_tcp_qpair tqpair = {}; 853 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 854 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 855 uint32_t error_offset = 1; 856 857 mgmt_pdu.qpair = &tqpair; 858 tqpair.mgmt_pdu = &mgmt_pdu; 859 tqpair.pdu_in_progress = &pdu_in_progress; 860 tqpair.tcp_pdu_working_count = 1; 861 862 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 863 pdu.hdr.common.hlen = 64; 864 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 865 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 866 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 867 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 868 pdu.hdr.common.hlen); 869 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 870 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 871 872 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 873 pdu.hdr.common.hlen = 255; 874 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 875 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 876 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 877 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 878 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 879 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 880 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 881 } 882 883 static void 884 test_nvmf_tcp_send_capsule_resp_pdu(void) 885 { 886 struct spdk_nvmf_tcp_req tcp_req = {}; 887 struct spdk_nvmf_tcp_qpair tqpair = {}; 888 struct nvme_tcp_pdu pdu = {}; 889 890 tcp_req.pdu_in_use = false; 891 tcp_req.req.qpair = &tqpair.qpair; 892 tcp_req.pdu = &pdu; 893 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 894 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 895 tqpair.host_hdgst_enable = true; 896 897 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 898 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 899 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 900 SPDK_NVME_TCP_DIGEST_LEN); 901 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 902 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 903 sizeof(struct spdk_nvme_cpl))); 904 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 905 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 906 CU_ASSERT(pdu.cb_arg == &tcp_req); 907 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 908 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 909 910 /* hdgst disable */ 911 tqpair.host_hdgst_enable = false; 912 tcp_req.pdu_in_use = false; 913 memset(&pdu, 0, sizeof(pdu)); 914 915 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 916 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 917 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 918 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 919 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 920 sizeof(struct spdk_nvme_cpl))); 921 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 922 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 923 CU_ASSERT(pdu.cb_arg == &tcp_req); 924 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 925 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 926 } 927 928 static void 929 test_nvmf_tcp_icreq_handle(void) 930 { 931 struct spdk_nvmf_tcp_transport ttransport = {}; 932 struct spdk_nvmf_tcp_qpair tqpair = {}; 933 struct nvme_tcp_pdu pdu = {}; 934 struct nvme_tcp_pdu mgmt_pdu = {}; 935 struct nvme_tcp_pdu pdu_in_progress = {}; 936 struct spdk_nvme_tcp_ic_resp *ic_resp; 937 938 mgmt_pdu.qpair = &tqpair; 939 tqpair.mgmt_pdu = &mgmt_pdu; 940 tqpair.pdu_in_progress = &pdu_in_progress; 941 tqpair.tcp_pdu_working_count = 1; 942 943 /* case 1: Expected ICReq PFV 0 and got are different. */ 944 pdu.hdr.ic_req.pfv = 1; 945 946 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 947 948 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 949 950 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 951 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 952 953 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 954 955 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 956 957 /* case 3: Expect: PASS. */ 958 ttransport.transport.opts.max_io_size = 32; 959 pdu.hdr.ic_req.pfv = 0; 960 tqpair.host_hdgst_enable = false; 961 tqpair.host_ddgst_enable = false; 962 tqpair.recv_buf_size = 64; 963 pdu.hdr.ic_req.hpda = 16; 964 965 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 966 967 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 968 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 969 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 970 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 971 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 972 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 973 CU_ASSERT(ic_resp->pfv == 0); 974 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 975 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 976 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 977 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 978 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 979 } 980 981 static void 982 test_nvmf_tcp_check_xfer_type(void) 983 { 984 const uint16_t cid = 0xAA; 985 struct spdk_nvmf_tcp_transport ttransport = {}; 986 struct spdk_nvmf_tcp_qpair tqpair = {}; 987 struct nvme_tcp_pdu pdu_in_progress = {}; 988 union nvmf_c2h_msg rsp0 = {}; 989 990 struct spdk_nvmf_tcp_req tcp_req = {}; 991 struct nvme_tcp_pdu rsp_pdu = {}; 992 993 struct spdk_nvme_tcp_cmd *capsule_data; 994 struct spdk_nvme_sgl_descriptor *sgl; 995 996 struct spdk_nvmf_transport_poll_group *group; 997 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 998 struct spdk_sock_group grp = {}; 999 1000 tqpair.pdu_in_progress = &pdu_in_progress; 1001 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1002 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1003 1004 tcp_group.sock_group = &grp; 1005 TAILQ_INIT(&tcp_group.qpairs); 1006 group = &tcp_group.group; 1007 group->transport = &ttransport.transport; 1008 tqpair.group = &tcp_group; 1009 1010 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1011 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1012 1013 tqpair.qpair.transport = &ttransport.transport; 1014 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1015 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1016 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1017 1018 /* init tcp_req */ 1019 tcp_req.req.qpair = &tqpair.qpair; 1020 tcp_req.pdu = &rsp_pdu; 1021 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1022 tcp_req.req.rsp = &rsp0; 1023 tcp_req.state = TCP_REQUEST_STATE_NEW; 1024 1025 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1026 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1027 1028 /* init pdu, make pdu need sgl buff */ 1029 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1030 sgl = &capsule_data->ccsqe.dptr.sgl1; 1031 1032 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1033 capsule_data->common.hlen = sizeof(*capsule_data); 1034 capsule_data->common.plen = 1096; 1035 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1036 /* Need to set to a non zero valid to check it gets copied to the response */ 1037 capsule_data->ccsqe.cid = cid; 1038 1039 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1040 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1041 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1042 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1043 1044 /* Process a command and ensure that it fails and the request is set up to return an error */ 1045 nvmf_tcp_req_process(&ttransport, &tcp_req); 1046 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1047 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1048 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1049 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1050 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1051 } 1052 1053 static void 1054 test_nvmf_tcp_invalid_sgl(void) 1055 { 1056 const uint16_t cid = 0xAABB; 1057 struct spdk_nvmf_tcp_transport ttransport = {}; 1058 struct spdk_nvmf_tcp_qpair tqpair = {}; 1059 struct nvme_tcp_pdu pdu_in_progress = {}; 1060 union nvmf_c2h_msg rsp0 = {}; 1061 1062 struct spdk_nvmf_tcp_req tcp_req = {}; 1063 struct nvme_tcp_pdu rsp_pdu = {}; 1064 struct nvme_tcp_pdu mgmt_pdu = {}; 1065 1066 struct spdk_nvme_tcp_cmd *capsule_data; 1067 struct spdk_nvme_sgl_descriptor *sgl; 1068 1069 struct spdk_nvmf_transport_poll_group *group; 1070 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1071 struct spdk_sock_group grp = {}; 1072 1073 tqpair.pdu_in_progress = &pdu_in_progress; 1074 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1075 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1076 1077 tcp_group.sock_group = &grp; 1078 TAILQ_INIT(&tcp_group.qpairs); 1079 group = &tcp_group.group; 1080 group->transport = &ttransport.transport; 1081 tqpair.group = &tcp_group; 1082 1083 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1084 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1085 1086 tqpair.qpair.transport = &ttransport.transport; 1087 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1088 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1089 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1090 1091 /* init tcp_req */ 1092 tcp_req.req.qpair = &tqpair.qpair; 1093 tcp_req.pdu = &rsp_pdu; 1094 tcp_req.pdu->qpair = &tqpair; 1095 tqpair.mgmt_pdu = &mgmt_pdu; 1096 tqpair.mgmt_pdu->qpair = &tqpair; 1097 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1098 tcp_req.req.rsp = &rsp0; 1099 tcp_req.state = TCP_REQUEST_STATE_NEW; 1100 1101 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1102 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1103 1104 /* init pdu, make pdu need sgl buff */ 1105 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1106 sgl = &capsule_data->ccsqe.dptr.sgl1; 1107 1108 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1109 capsule_data->common.hlen = sizeof(*capsule_data); 1110 capsule_data->common.plen = 1096; 1111 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1112 /* Need to set to a non zero valid to check it gets copied to the response */ 1113 capsule_data->ccsqe.cid = cid; 1114 1115 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1116 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1117 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1118 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1119 1120 /* Process a command and ensure that it fails and the request is set up to return an error */ 1121 nvmf_tcp_req_process(&ttransport, &tcp_req); 1122 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1123 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1124 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1125 } 1126 1127 static void 1128 test_nvmf_tcp_pdu_ch_handle(void) 1129 { 1130 struct spdk_nvmf_tcp_qpair tqpair = {}; 1131 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1132 1133 mgmt_pdu.qpair = &tqpair; 1134 tqpair.mgmt_pdu = &mgmt_pdu; 1135 tqpair.pdu_in_progress = &pdu_in_progress; 1136 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1137 tqpair.cpda = 0; 1138 1139 /* Test case: Already received ICreq PDU. Expect: fail */ 1140 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1141 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1142 nvmf_tcp_pdu_ch_handle(&tqpair); 1143 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1144 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1145 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1146 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1147 1148 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1149 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1150 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1151 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1152 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1153 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1154 nvmf_tcp_pdu_ch_handle(&tqpair); 1155 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1156 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1157 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1158 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1159 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1160 1161 /* Test case: The TCP/IP tqpair connection is not negotiated. Expect: fail */ 1162 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1163 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1164 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1165 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1166 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1167 nvmf_tcp_pdu_ch_handle(&tqpair); 1168 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1169 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1170 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1171 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1172 1173 /* Test case: Unexpected PDU type. Expect: fail */ 1174 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1175 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1176 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1177 tqpair.pdu_in_progress->hdr.common.plen = 0; 1178 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1179 nvmf_tcp_pdu_ch_handle(&tqpair); 1180 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1181 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1182 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1183 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1184 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1185 1186 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1187 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1188 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1189 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1190 tqpair.pdu_in_progress->hdr.common.plen = 0; 1191 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1192 nvmf_tcp_pdu_ch_handle(&tqpair); 1193 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1194 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1195 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1196 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1197 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1198 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1199 1200 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1201 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1202 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1203 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1204 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1205 tqpair.pdu_in_progress->hdr.common.plen = 0; 1206 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1207 nvmf_tcp_pdu_ch_handle(&tqpair); 1208 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1209 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1210 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1211 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1212 struct spdk_nvme_tcp_term_req_hdr)); 1213 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1214 1215 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1216 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1217 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1218 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1219 tqpair.pdu_in_progress->hdr.common.plen = 0; 1220 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1221 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1222 nvmf_tcp_pdu_ch_handle(&tqpair); 1223 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1224 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1225 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1226 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1227 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1228 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1229 1230 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1231 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1232 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1233 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1234 tqpair.pdu_in_progress->hdr.common.plen = 0; 1235 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1236 nvmf_tcp_pdu_ch_handle(&tqpair); 1237 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1238 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1239 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1240 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1241 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1242 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1243 1244 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1245 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1246 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1247 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1248 tqpair.cpda = 1; 1249 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1250 tqpair.pdu_in_progress->hdr.common.plen = 0; 1251 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1252 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1253 nvmf_tcp_pdu_ch_handle(&tqpair); 1254 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1255 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1256 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1257 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1258 struct spdk_nvme_tcp_term_req_hdr)); 1259 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1260 1261 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1262 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1263 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1264 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1265 tqpair.cpda = 1; 1266 tqpair.pdu_in_progress->hdr.common.plen = 0; 1267 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1268 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1269 nvmf_tcp_pdu_ch_handle(&tqpair); 1270 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1271 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1272 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1273 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1274 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1275 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1276 1277 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1278 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1279 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1280 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1281 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1282 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1283 nvmf_tcp_pdu_ch_handle(&tqpair); 1284 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1285 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1286 struct spdk_nvme_tcp_common_pdu_hdr)); 1287 } 1288 1289 static void 1290 test_nvmf_tcp_tls_add_remove_credentials(void) 1291 { 1292 struct spdk_thread *thread; 1293 struct spdk_nvmf_transport *transport; 1294 struct spdk_nvmf_tcp_transport *ttransport; 1295 struct spdk_nvmf_transport_opts opts; 1296 struct spdk_nvmf_subsystem subsystem; 1297 struct tcp_psk_entry *entry; 1298 struct spdk_sock_group grp = {}; 1299 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1300 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1301 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1302 char *psk_file_path = "/tmp/psk.txt"; 1303 bool found = false; 1304 FILE *psk_file = NULL; 1305 mode_t oldmask; 1306 1307 thread = spdk_thread_create(NULL, NULL); 1308 SPDK_CU_ASSERT_FATAL(thread != NULL); 1309 spdk_set_thread(thread); 1310 1311 memset(&opts, 0, sizeof(opts)); 1312 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1313 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1314 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1315 opts.max_io_size = UT_MAX_IO_SIZE; 1316 opts.io_unit_size = UT_IO_UNIT_SIZE; 1317 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1318 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1319 MOCK_SET(spdk_sock_group_create, &grp); 1320 transport = nvmf_tcp_create(&opts); 1321 MOCK_CLEAR_P(spdk_sock_group_create); 1322 1323 memset(&subsystem, 0, sizeof(subsystem)); 1324 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1325 1326 /* Create a text file containing PSK in interchange format. */ 1327 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1328 psk_file = fopen(psk_file_path, "w"); 1329 CU_ASSERT(psk_file != NULL); 1330 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1331 CU_ASSERT(fclose(psk_file) == 0); 1332 umask(oldmask); 1333 1334 struct spdk_json_val psk_json[] = { 1335 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1336 {"psk", 3, SPDK_JSON_VAL_NAME}, 1337 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1338 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1339 }; 1340 1341 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1342 1343 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1344 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1345 if ((strcmp(subnqn, entry->subnqn) == 0) && 1346 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1347 found = true; 1348 } 1349 } 1350 1351 CU_ASSERT(found == true); 1352 found = false; 1353 1354 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1355 1356 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1357 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1358 if ((strcmp(subnqn, entry->subnqn) == 0) && 1359 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1360 found = true; 1361 } 1362 } 1363 1364 CU_ASSERT(found == false); 1365 1366 CU_ASSERT(remove(psk_file_path) == 0); 1367 1368 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1369 1370 spdk_thread_exit(thread); 1371 while (!spdk_thread_is_exited(thread)) { 1372 spdk_thread_poll(thread, 0, 0); 1373 } 1374 spdk_thread_destroy(thread); 1375 } 1376 1377 static void 1378 test_nvmf_tcp_tls_generate_psk_id(void) 1379 { 1380 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1381 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1382 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1383 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1384 char too_small_psk_id[5] = {}; 1385 1386 /* Check if we can generate expected PSK id. */ 1387 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1388 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1389 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1390 1391 /* Test with a buffer that is too small to fit PSK id. */ 1392 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1393 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1394 1395 /* Test with unknown cipher suite. */ 1396 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1397 subnqn, UINT8_MAX) != 0); 1398 } 1399 1400 static void 1401 test_nvmf_tcp_tls_generate_retained_psk(void) 1402 { 1403 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1404 const char psk_reference1[] = {"1234567890ABCDEF"}; 1405 const char psk_reference2[] = {"FEDCBA0987654321"}; 1406 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1407 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1408 char *unhexlified1; 1409 char *unhexlified2; 1410 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1411 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1412 uint8_t too_small_psk_retained[5] = {}; 1413 int psk_retained_len1, psk_retained_len2; 1414 int retained_size; 1415 1416 unhexlified1 = spdk_unhexlify(psk_reference1); 1417 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1418 unhexlified2 = spdk_unhexlify(psk_reference2); 1419 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1420 1421 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1422 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1423 free(unhexlified1); 1424 free(unhexlified2); 1425 1426 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1427 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1428 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1429 CU_ASSERT(retained_size > 0); 1430 1431 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1432 psk_retained2, 1433 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1434 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1435 1436 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1437 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1438 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1439 CU_ASSERT(psk_retained_len1 > 0); 1440 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1441 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1442 CU_ASSERT(psk_retained_len2 > 0); 1443 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1444 1445 /* Make sure that passing unknown value as hash errors out the function. */ 1446 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1447 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1448 1449 /* Make sure that passing buffer insufficient in size errors out the function. */ 1450 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1451 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1452 } 1453 1454 static void 1455 test_nvmf_tcp_tls_generate_tls_psk(void) 1456 { 1457 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1458 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1459 const char psk_reference[] = {"1234567890ABCDEF"}; 1460 char *unhexlified; 1461 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1462 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1463 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1464 uint8_t too_small_psk_tls[5] = {}; 1465 int retained_size, tls_size; 1466 1467 unhexlified = spdk_unhexlify(psk_reference); 1468 CU_ASSERT(unhexlified != NULL); 1469 1470 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1471 free(unhexlified); 1472 1473 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1474 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1475 CU_ASSERT(retained_size > 0); 1476 1477 /* Make sure that different cipher suites produce different TLS PSKs. */ 1478 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1479 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1480 CU_ASSERT(tls_size > 0); 1481 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1482 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1483 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1484 1485 /* Make sure that passing unknown value as hash errors out the function. */ 1486 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1487 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1488 1489 /* Make sure that passing buffer insufficient in size errors out the function. */ 1490 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1491 too_small_psk_tls, sizeof(too_small_psk_tls), 1492 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1493 } 1494 1495 int 1496 main(int argc, char **argv) 1497 { 1498 CU_pSuite suite = NULL; 1499 unsigned int num_failures; 1500 1501 CU_initialize_registry(); 1502 1503 suite = CU_add_suite("nvmf", NULL, NULL); 1504 1505 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1506 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1507 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1508 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1509 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1510 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1511 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1512 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1513 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1514 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1515 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1516 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1517 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1518 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1519 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1520 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1521 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1522 1523 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1524 CU_cleanup_registry(); 1525 return num_failures; 1526 } 1527