1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 90 (struct spdk_nvmf_subsystem *subsystem), false); 91 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 92 bool, 93 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 94 true); 95 96 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 97 bool, 98 (struct spdk_nvmf_ctrlr *ctrlr), 99 false); 100 101 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 102 bool, 103 (struct spdk_nvmf_ctrlr *ctrlr), 104 false); 105 106 DEFINE_STUB(nvmf_ctrlr_copy_supported, 107 bool, 108 (struct spdk_nvmf_ctrlr *ctrlr), 109 false); 110 111 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 112 int, 113 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 114 struct spdk_nvmf_request *req), 115 0); 116 117 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 118 int, 119 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 120 struct spdk_nvmf_request *req), 121 0); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 169 0); 170 171 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 172 bool, 173 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 174 false); 175 176 DEFINE_STUB(nvmf_transport_req_complete, 177 int, 178 (struct spdk_nvmf_request *req), 179 0); 180 181 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 182 bool, 183 (struct spdk_bdev *bdev), 184 false); 185 186 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 187 int, 188 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 189 struct spdk_nvmf_request *req), 190 0); 191 192 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 193 194 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 195 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 196 struct spdk_nvmf_transport *transport)); 197 198 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 199 int, 200 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 201 0); 202 203 DEFINE_STUB(spdk_sock_group_get_ctx, 204 void *, 205 (struct spdk_sock_group *group), 206 NULL); 207 208 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 209 210 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 211 enum spdk_nvme_transport_type trtype)); 212 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 213 214 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 215 216 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 217 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 218 219 DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s)); 220 221 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 222 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 223 224 DEFINE_STUB(nvmf_transport_req_free, 225 int, 226 (struct spdk_nvmf_request *req), 227 0); 228 229 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 230 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 231 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 232 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 233 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 234 235 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 236 (const struct spdk_bdev *bdev), 0); 237 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 238 (const struct spdk_bdev *bdev), 0); 239 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 240 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 241 242 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 243 (const struct spdk_nvme_ns_data *nsdata), 0); 244 245 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 246 247 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 248 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 249 (const struct spdk_nvmf_subsystem *subsystem), NULL); 250 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 251 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 252 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 253 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 254 255 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 256 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 257 false); 258 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 259 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 260 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 261 262 struct spdk_io_channel * 263 spdk_accel_get_io_channel(void) 264 { 265 return spdk_get_io_channel(g_accel_p); 266 } 267 268 DEFINE_STUB(spdk_accel_submit_crc32cv, 269 int, 270 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 271 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 272 0); 273 274 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 275 int, 276 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 277 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 278 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 279 0) 280 281 struct spdk_bdev { 282 int ut_mock; 283 uint64_t blockcnt; 284 }; 285 286 int 287 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 288 const struct spdk_nvme_transport_id *trid2) 289 { 290 return 0; 291 } 292 293 const char * 294 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 295 { 296 switch (trtype) { 297 case SPDK_NVME_TRANSPORT_PCIE: 298 return "PCIe"; 299 case SPDK_NVME_TRANSPORT_RDMA: 300 return "RDMA"; 301 case SPDK_NVME_TRANSPORT_FC: 302 return "FC"; 303 default: 304 return NULL; 305 } 306 } 307 308 int 309 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 310 { 311 int len, i; 312 313 if (trstring == NULL) { 314 return -EINVAL; 315 } 316 317 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 318 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 319 return -EINVAL; 320 } 321 322 /* cast official trstring to uppercase version of input. */ 323 for (i = 0; i < len; i++) { 324 trid->trstring[i] = toupper(trstring[i]); 325 } 326 return 0; 327 } 328 329 int 330 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 331 struct spdk_nvmf_transport_poll_group *group, 332 struct spdk_nvmf_transport *transport, 333 uint32_t length) 334 { 335 /* length more than 1 io unit length will fail. */ 336 if (length >= transport->opts.io_unit_size) { 337 return -EINVAL; 338 } 339 340 req->iovcnt = 1; 341 req->iov[0].iov_base = (void *)0xDEADBEEF; 342 343 return 0; 344 } 345 346 347 void 348 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 349 bool dif_insert_or_strip) 350 { 351 uint64_t num_blocks; 352 353 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 354 num_blocks = ns->bdev->blockcnt; 355 nsdata->nsze = num_blocks; 356 nsdata->ncap = num_blocks; 357 nsdata->nuse = num_blocks; 358 nsdata->nlbaf = 0; 359 nsdata->flbas.format = 0; 360 nsdata->flbas.msb_format = 0; 361 nsdata->lbaf[0].lbads = spdk_u32log2(512); 362 } 363 364 const char * 365 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 366 { 367 return subsystem->sn; 368 } 369 370 const char * 371 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 372 { 373 return subsystem->mn; 374 } 375 376 static void 377 test_nvmf_tcp_create(void) 378 { 379 struct spdk_thread *thread; 380 struct spdk_nvmf_transport *transport; 381 struct spdk_nvmf_tcp_transport *ttransport; 382 struct spdk_nvmf_transport_opts opts; 383 struct spdk_sock_group grp = {}; 384 385 thread = spdk_thread_create(NULL, NULL); 386 SPDK_CU_ASSERT_FATAL(thread != NULL); 387 spdk_set_thread(thread); 388 389 MOCK_SET(spdk_sock_group_create, &grp); 390 391 /* case 1 */ 392 memset(&opts, 0, sizeof(opts)); 393 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 394 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 395 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 396 opts.max_io_size = UT_MAX_IO_SIZE; 397 opts.io_unit_size = UT_IO_UNIT_SIZE; 398 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 399 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 400 /* expect success */ 401 transport = nvmf_tcp_create(&opts); 402 CU_ASSERT_PTR_NOT_NULL(transport); 403 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 404 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 405 transport->opts = opts; 406 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 407 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 408 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 409 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 410 /* destroy transport */ 411 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 412 413 /* case 2 */ 414 memset(&opts, 0, sizeof(opts)); 415 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 416 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 417 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 418 opts.max_io_size = UT_MAX_IO_SIZE; 419 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 420 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 421 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 422 /* expect success */ 423 transport = nvmf_tcp_create(&opts); 424 CU_ASSERT_PTR_NOT_NULL(transport); 425 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 426 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 427 transport->opts = opts; 428 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 429 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 430 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 431 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 432 /* destroy transport */ 433 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 434 435 /* case 3 */ 436 memset(&opts, 0, sizeof(opts)); 437 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 438 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 439 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 440 opts.max_io_size = UT_MAX_IO_SIZE; 441 opts.io_unit_size = 16; 442 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 443 /* expect fails */ 444 transport = nvmf_tcp_create(&opts); 445 CU_ASSERT_PTR_NULL(transport); 446 447 MOCK_CLEAR_P(spdk_sock_group_create); 448 449 spdk_thread_exit(thread); 450 while (!spdk_thread_is_exited(thread)) { 451 spdk_thread_poll(thread, 0, 0); 452 } 453 spdk_thread_destroy(thread); 454 } 455 456 static void 457 test_nvmf_tcp_destroy(void) 458 { 459 struct spdk_thread *thread; 460 struct spdk_nvmf_transport *transport; 461 struct spdk_nvmf_transport_opts opts; 462 struct spdk_sock_group grp = {}; 463 464 thread = spdk_thread_create(NULL, NULL); 465 SPDK_CU_ASSERT_FATAL(thread != NULL); 466 spdk_set_thread(thread); 467 468 /* case 1 */ 469 memset(&opts, 0, sizeof(opts)); 470 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 471 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 472 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 473 opts.max_io_size = UT_MAX_IO_SIZE; 474 opts.io_unit_size = UT_IO_UNIT_SIZE; 475 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 476 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 477 MOCK_SET(spdk_sock_group_create, &grp); 478 transport = nvmf_tcp_create(&opts); 479 MOCK_CLEAR_P(spdk_sock_group_create); 480 CU_ASSERT_PTR_NOT_NULL(transport); 481 transport->opts = opts; 482 /* destroy transport */ 483 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 484 485 spdk_thread_exit(thread); 486 while (!spdk_thread_is_exited(thread)) { 487 spdk_thread_poll(thread, 0, 0); 488 } 489 spdk_thread_destroy(thread); 490 } 491 492 static void 493 init_accel(void) 494 { 495 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 496 sizeof(int), "accel_p"); 497 } 498 499 static void 500 fini_accel(void) 501 { 502 spdk_io_device_unregister(g_accel_p, NULL); 503 } 504 505 static void 506 test_nvmf_tcp_poll_group_create(void) 507 { 508 struct spdk_nvmf_transport *transport; 509 struct spdk_nvmf_transport_poll_group *group; 510 struct spdk_nvmf_tcp_poll_group *tgroup; 511 struct spdk_thread *thread; 512 struct spdk_nvmf_transport_opts opts; 513 struct spdk_sock_group grp = {}; 514 515 thread = spdk_thread_create(NULL, NULL); 516 SPDK_CU_ASSERT_FATAL(thread != NULL); 517 spdk_set_thread(thread); 518 519 init_accel(); 520 521 memset(&opts, 0, sizeof(opts)); 522 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 523 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 524 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 525 opts.max_io_size = UT_MAX_IO_SIZE; 526 opts.io_unit_size = UT_IO_UNIT_SIZE; 527 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 528 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 529 MOCK_SET(spdk_sock_group_create, &grp); 530 transport = nvmf_tcp_create(&opts); 531 MOCK_CLEAR_P(spdk_sock_group_create); 532 CU_ASSERT_PTR_NOT_NULL(transport); 533 transport->opts = opts; 534 MOCK_SET(spdk_sock_group_create, &grp); 535 group = nvmf_tcp_poll_group_create(transport, NULL); 536 MOCK_CLEAR_P(spdk_sock_group_create); 537 SPDK_CU_ASSERT_FATAL(group); 538 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 539 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 540 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 541 } 542 group->transport = transport; 543 nvmf_tcp_poll_group_destroy(group); 544 nvmf_tcp_destroy(transport, NULL, NULL); 545 546 fini_accel(); 547 spdk_thread_exit(thread); 548 while (!spdk_thread_is_exited(thread)) { 549 spdk_thread_poll(thread, 0, 0); 550 } 551 spdk_thread_destroy(thread); 552 } 553 554 static void 555 test_nvmf_tcp_send_c2h_data(void) 556 { 557 struct spdk_thread *thread; 558 struct spdk_nvmf_tcp_transport ttransport = {}; 559 struct spdk_nvmf_tcp_qpair tqpair = {}; 560 struct spdk_nvmf_tcp_req tcp_req = {}; 561 struct nvme_tcp_pdu pdu = {}; 562 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 563 564 ttransport.tcp_opts.c2h_success = true; 565 thread = spdk_thread_create(NULL, NULL); 566 SPDK_CU_ASSERT_FATAL(thread != NULL); 567 spdk_set_thread(thread); 568 569 tcp_req.pdu = &pdu; 570 tcp_req.req.length = 300; 571 tcp_req.req.qpair = &tqpair.qpair; 572 573 tqpair.qpair.transport = &ttransport.transport; 574 575 /* Set qpair state to make unrelated operations NOP */ 576 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 577 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 578 579 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 580 581 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 582 tcp_req.req.iov[0].iov_len = 101; 583 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 584 tcp_req.req.iov[1].iov_len = 100; 585 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 586 tcp_req.req.iov[2].iov_len = 99; 587 tcp_req.req.iovcnt = 3; 588 tcp_req.req.length = 300; 589 590 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 591 592 c2h_data = &pdu.hdr.c2h_data; 593 CU_ASSERT(c2h_data->datao == 0); 594 CU_ASSERT(c2h_data->datal = 300); 595 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 596 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 597 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 598 599 CU_ASSERT(pdu.data_iovcnt == 3); 600 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 601 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 602 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 603 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 604 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 605 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 606 607 tcp_req.pdu_in_use = false; 608 tcp_req.rsp.cdw0 = 1; 609 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 610 611 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 612 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 613 614 ttransport.tcp_opts.c2h_success = false; 615 tcp_req.pdu_in_use = false; 616 tcp_req.rsp.cdw0 = 0; 617 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 618 619 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 620 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 621 622 tcp_req.pdu_in_use = false; 623 tcp_req.rsp.cdw0 = 1; 624 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 625 626 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 627 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 628 629 spdk_thread_exit(thread); 630 while (!spdk_thread_is_exited(thread)) { 631 spdk_thread_poll(thread, 0, 0); 632 } 633 spdk_thread_destroy(thread); 634 } 635 636 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 637 638 static void 639 test_nvmf_tcp_h2c_data_hdr_handle(void) 640 { 641 struct spdk_nvmf_tcp_transport ttransport = {}; 642 struct spdk_nvmf_tcp_qpair tqpair = {}; 643 struct nvme_tcp_pdu pdu = {}; 644 struct spdk_nvmf_tcp_req tcp_req = {}; 645 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 646 647 /* Set qpair state to make unrelated operations NOP */ 648 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 649 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 650 tqpair.resource_count = 1; 651 tqpair.reqs = &tcp_req; 652 653 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 654 tcp_req.req.iov[0].iov_len = 101; 655 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 656 tcp_req.req.iov[1].iov_len = 99; 657 tcp_req.req.iovcnt = 2; 658 tcp_req.req.length = 200; 659 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 660 661 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 662 tcp_req.req.cmd->nvme_cmd.cid = 1; 663 tcp_req.ttag = 1; 664 665 h2c_data = &pdu.hdr.h2c_data; 666 h2c_data->cccid = 1; 667 h2c_data->ttag = 1; 668 h2c_data->datao = 0; 669 h2c_data->datal = 200; 670 671 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 672 673 CU_ASSERT(pdu.data_iovcnt == 2); 674 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 675 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 676 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 677 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 678 } 679 680 681 static void 682 test_nvmf_tcp_in_capsule_data_handle(void) 683 { 684 struct spdk_nvmf_tcp_transport ttransport = {}; 685 struct spdk_nvmf_tcp_qpair tqpair = {}; 686 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 687 union nvmf_c2h_msg rsp0 = {}; 688 union nvmf_c2h_msg rsp = {}; 689 690 struct spdk_nvmf_request *req_temp = NULL; 691 struct spdk_nvmf_tcp_req tcp_req2 = {}; 692 struct spdk_nvmf_tcp_req tcp_req1 = {}; 693 694 struct spdk_nvme_tcp_cmd *capsule_data; 695 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 696 struct spdk_nvme_sgl_descriptor *sgl; 697 698 struct spdk_nvmf_transport_poll_group *group; 699 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 700 struct spdk_sock_group grp = {}; 701 702 tqpair.pdu_in_progress = &pdu_in_progress; 703 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 704 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 705 706 tcp_group.sock_group = &grp; 707 TAILQ_INIT(&tcp_group.qpairs); 708 group = &tcp_group.group; 709 group->transport = &ttransport.transport; 710 STAILQ_INIT(&group->pending_buf_queue); 711 tqpair.group = &tcp_group; 712 713 TAILQ_INIT(&tqpair.tcp_req_free_queue); 714 TAILQ_INIT(&tqpair.tcp_req_working_queue); 715 716 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 717 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 718 tqpair.qpair.transport = &ttransport.transport; 719 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 720 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 721 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 722 723 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 724 tcp_req2.req.qpair = &tqpair.qpair; 725 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 726 tcp_req2.req.rsp = &rsp; 727 728 /* init tcp_req1 */ 729 tcp_req1.req.qpair = &tqpair.qpair; 730 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 731 tcp_req1.req.rsp = &rsp0; 732 tcp_req1.state = TCP_REQUEST_STATE_NEW; 733 734 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 735 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 736 737 /* init pdu, make pdu need sgl buff */ 738 pdu = tqpair.pdu_in_progress; 739 capsule_data = &pdu->hdr.capsule_cmd; 740 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 741 sgl = &capsule_data->ccsqe.dptr.sgl1; 742 743 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 744 capsule_data->common.hlen = sizeof(*capsule_data); 745 capsule_data->common.plen = 1096; 746 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 747 748 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 749 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 750 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 751 752 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 753 754 /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */ 755 nvmf_tcp_req_process(&ttransport, &tcp_req1); 756 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 757 758 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 759 760 /* process tqpair capsule req. but we still remain req in pending_buff. */ 761 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 762 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 763 CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req); 764 STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) { 765 if (req_temp == &tcp_req2.req) { 766 break; 767 } 768 } 769 CU_ASSERT(req_temp == NULL); 770 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 771 } 772 773 static void 774 test_nvmf_tcp_qpair_init_mem_resource(void) 775 { 776 int rc; 777 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 778 struct spdk_nvmf_transport transport = {}; 779 struct spdk_thread *thread; 780 781 thread = spdk_thread_create(NULL, NULL); 782 SPDK_CU_ASSERT_FATAL(thread != NULL); 783 spdk_set_thread(thread); 784 785 tqpair = calloc(1, sizeof(*tqpair)); 786 tqpair->qpair.transport = &transport; 787 788 nvmf_tcp_opts_init(&transport.opts); 789 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 790 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 791 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 792 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 793 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 794 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 795 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 796 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 797 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 798 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 799 CU_ASSERT(transport.opts.transport_specific == NULL); 800 801 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 802 CU_ASSERT(rc == 0); 803 CU_ASSERT(tqpair->host_hdgst_enable == true); 804 CU_ASSERT(tqpair->host_ddgst_enable == true); 805 806 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 807 CU_ASSERT(rc == 0); 808 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 809 CU_ASSERT(tqpair->reqs != NULL); 810 CU_ASSERT(tqpair->bufs != NULL); 811 CU_ASSERT(tqpair->pdus != NULL); 812 /* Just to check the first and last entry */ 813 CU_ASSERT(tqpair->reqs[0].ttag == 1); 814 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 815 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 816 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 817 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 818 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 819 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 820 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 821 CU_ASSERT(tqpair->reqs[127].ttag == 128); 822 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 823 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 824 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 825 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 826 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 827 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 828 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 829 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 830 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 831 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 832 CU_ASSERT(tqpair->pdu_in_progress == 833 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 834 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 835 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 836 837 /* Free all of tqpair resource */ 838 nvmf_tcp_qpair_destroy(tqpair); 839 840 spdk_thread_exit(thread); 841 while (!spdk_thread_is_exited(thread)) { 842 spdk_thread_poll(thread, 0, 0); 843 } 844 spdk_thread_destroy(thread); 845 } 846 847 static void 848 test_nvmf_tcp_send_c2h_term_req(void) 849 { 850 struct spdk_nvmf_tcp_qpair tqpair = {}; 851 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 852 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 853 uint32_t error_offset = 1; 854 855 mgmt_pdu.qpair = &tqpair; 856 tqpair.mgmt_pdu = &mgmt_pdu; 857 tqpair.pdu_in_progress = &pdu_in_progress; 858 tqpair.tcp_pdu_working_count = 1; 859 860 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 861 pdu.hdr.common.hlen = 64; 862 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 863 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 864 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 865 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 866 pdu.hdr.common.hlen); 867 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 868 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 869 870 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 871 pdu.hdr.common.hlen = 255; 872 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 873 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 874 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 875 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 876 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 877 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 878 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 879 } 880 881 static void 882 test_nvmf_tcp_send_capsule_resp_pdu(void) 883 { 884 struct spdk_nvmf_tcp_req tcp_req = {}; 885 struct spdk_nvmf_tcp_qpair tqpair = {}; 886 struct nvme_tcp_pdu pdu = {}; 887 888 tcp_req.pdu_in_use = false; 889 tcp_req.req.qpair = &tqpair.qpair; 890 tcp_req.pdu = &pdu; 891 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 892 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 893 tqpair.host_hdgst_enable = true; 894 895 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 896 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 897 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 898 SPDK_NVME_TCP_DIGEST_LEN); 899 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 900 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 901 sizeof(struct spdk_nvme_cpl))); 902 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 903 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 904 CU_ASSERT(pdu.cb_arg == &tcp_req); 905 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 906 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 907 908 /* hdgst disable */ 909 tqpair.host_hdgst_enable = false; 910 tcp_req.pdu_in_use = false; 911 memset(&pdu, 0, sizeof(pdu)); 912 913 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 914 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 915 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 916 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 917 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 918 sizeof(struct spdk_nvme_cpl))); 919 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 920 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 921 CU_ASSERT(pdu.cb_arg == &tcp_req); 922 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 923 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 924 } 925 926 static void 927 test_nvmf_tcp_icreq_handle(void) 928 { 929 struct spdk_nvmf_tcp_transport ttransport = {}; 930 struct spdk_nvmf_tcp_qpair tqpair = {}; 931 struct nvme_tcp_pdu pdu = {}; 932 struct nvme_tcp_pdu mgmt_pdu = {}; 933 struct nvme_tcp_pdu pdu_in_progress = {}; 934 struct spdk_nvme_tcp_ic_resp *ic_resp; 935 936 mgmt_pdu.qpair = &tqpair; 937 tqpair.mgmt_pdu = &mgmt_pdu; 938 tqpair.pdu_in_progress = &pdu_in_progress; 939 tqpair.tcp_pdu_working_count = 1; 940 941 /* case 1: Expected ICReq PFV 0 and got are different. */ 942 pdu.hdr.ic_req.pfv = 1; 943 944 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 945 946 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 947 948 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 949 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 950 951 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 952 953 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 954 955 /* case 3: Expect: PASS. */ 956 ttransport.transport.opts.max_io_size = 32; 957 pdu.hdr.ic_req.pfv = 0; 958 tqpair.host_hdgst_enable = false; 959 tqpair.host_ddgst_enable = false; 960 tqpair.recv_buf_size = 64; 961 pdu.hdr.ic_req.hpda = 16; 962 963 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 964 965 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 966 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 967 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 968 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 969 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 970 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 971 CU_ASSERT(ic_resp->pfv == 0); 972 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 973 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 974 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 975 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 976 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 977 } 978 979 static void 980 test_nvmf_tcp_check_xfer_type(void) 981 { 982 const uint16_t cid = 0xAA; 983 struct spdk_nvmf_tcp_transport ttransport = {}; 984 struct spdk_nvmf_tcp_qpair tqpair = {}; 985 struct nvme_tcp_pdu pdu_in_progress = {}; 986 union nvmf_c2h_msg rsp0 = {}; 987 988 struct spdk_nvmf_tcp_req tcp_req = {}; 989 struct nvme_tcp_pdu rsp_pdu = {}; 990 991 struct spdk_nvme_tcp_cmd *capsule_data; 992 struct spdk_nvme_sgl_descriptor *sgl; 993 994 struct spdk_nvmf_transport_poll_group *group; 995 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 996 struct spdk_sock_group grp = {}; 997 998 tqpair.pdu_in_progress = &pdu_in_progress; 999 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1000 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1001 1002 tcp_group.sock_group = &grp; 1003 TAILQ_INIT(&tcp_group.qpairs); 1004 group = &tcp_group.group; 1005 group->transport = &ttransport.transport; 1006 STAILQ_INIT(&group->pending_buf_queue); 1007 tqpair.group = &tcp_group; 1008 1009 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1010 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1011 1012 tqpair.qpair.transport = &ttransport.transport; 1013 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1014 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1015 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1016 1017 /* init tcp_req */ 1018 tcp_req.req.qpair = &tqpair.qpair; 1019 tcp_req.pdu = &rsp_pdu; 1020 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1021 tcp_req.req.rsp = &rsp0; 1022 tcp_req.state = TCP_REQUEST_STATE_NEW; 1023 1024 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1025 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1026 1027 /* init pdu, make pdu need sgl buff */ 1028 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1029 sgl = &capsule_data->ccsqe.dptr.sgl1; 1030 1031 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1032 capsule_data->common.hlen = sizeof(*capsule_data); 1033 capsule_data->common.plen = 1096; 1034 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1035 /* Need to set to a non zero valid to check it gets copied to the response */ 1036 capsule_data->ccsqe.cid = cid; 1037 1038 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1039 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1040 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1041 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1042 1043 /* Process a command and ensure that it fails and the request is set up to return an error */ 1044 nvmf_tcp_req_process(&ttransport, &tcp_req); 1045 CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue)); 1046 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1047 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1048 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1049 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1050 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1051 } 1052 1053 static void 1054 test_nvmf_tcp_invalid_sgl(void) 1055 { 1056 const uint16_t cid = 0xAABB; 1057 struct spdk_nvmf_tcp_transport ttransport = {}; 1058 struct spdk_nvmf_tcp_qpair tqpair = {}; 1059 struct nvme_tcp_pdu pdu_in_progress = {}; 1060 union nvmf_c2h_msg rsp0 = {}; 1061 1062 struct spdk_nvmf_tcp_req tcp_req = {}; 1063 struct nvme_tcp_pdu rsp_pdu = {}; 1064 struct nvme_tcp_pdu mgmt_pdu = {}; 1065 1066 struct spdk_nvme_tcp_cmd *capsule_data; 1067 struct spdk_nvme_sgl_descriptor *sgl; 1068 1069 struct spdk_nvmf_transport_poll_group *group; 1070 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1071 struct spdk_sock_group grp = {}; 1072 1073 tqpair.pdu_in_progress = &pdu_in_progress; 1074 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1075 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1076 1077 tcp_group.sock_group = &grp; 1078 TAILQ_INIT(&tcp_group.qpairs); 1079 group = &tcp_group.group; 1080 group->transport = &ttransport.transport; 1081 STAILQ_INIT(&group->pending_buf_queue); 1082 tqpair.group = &tcp_group; 1083 1084 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1085 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1086 1087 tqpair.qpair.transport = &ttransport.transport; 1088 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1089 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1090 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1091 1092 /* init tcp_req */ 1093 tcp_req.req.qpair = &tqpair.qpair; 1094 tcp_req.pdu = &rsp_pdu; 1095 tcp_req.pdu->qpair = &tqpair; 1096 tqpair.mgmt_pdu = &mgmt_pdu; 1097 tqpair.mgmt_pdu->qpair = &tqpair; 1098 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1099 tcp_req.req.rsp = &rsp0; 1100 tcp_req.state = TCP_REQUEST_STATE_NEW; 1101 1102 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1103 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1104 1105 /* init pdu, make pdu need sgl buff */ 1106 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1107 sgl = &capsule_data->ccsqe.dptr.sgl1; 1108 1109 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1110 capsule_data->common.hlen = sizeof(*capsule_data); 1111 capsule_data->common.plen = 1096; 1112 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1113 /* Need to set to a non zero valid to check it gets copied to the response */ 1114 capsule_data->ccsqe.cid = cid; 1115 1116 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1117 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1118 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1119 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1120 1121 /* Process a command and ensure that it fails and the request is set up to return an error */ 1122 nvmf_tcp_req_process(&ttransport, &tcp_req); 1123 CU_ASSERT(!STAILQ_EMPTY(&group->pending_buf_queue)); 1124 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1125 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1126 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1127 } 1128 1129 static void 1130 test_nvmf_tcp_pdu_ch_handle(void) 1131 { 1132 struct spdk_nvmf_tcp_qpair tqpair = {}; 1133 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1134 1135 mgmt_pdu.qpair = &tqpair; 1136 tqpair.mgmt_pdu = &mgmt_pdu; 1137 tqpair.pdu_in_progress = &pdu_in_progress; 1138 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1139 tqpair.cpda = 0; 1140 1141 /* Test case: Already received ICreq PDU. Expect: fail */ 1142 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1143 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1144 nvmf_tcp_pdu_ch_handle(&tqpair); 1145 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1146 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1147 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1148 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1149 1150 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1151 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1152 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1153 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1154 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1155 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1156 nvmf_tcp_pdu_ch_handle(&tqpair); 1157 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1158 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1159 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1160 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1161 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1162 1163 /* Test case: The TCP/IP tqpair connection is not negotitated. Expect: fail */ 1164 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1165 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1166 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1167 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1168 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1169 nvmf_tcp_pdu_ch_handle(&tqpair); 1170 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1171 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1172 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1173 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1174 1175 /* Test case: Unexpected PDU type. Expect: fail */ 1176 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1177 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1178 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1179 tqpair.pdu_in_progress->hdr.common.plen = 0; 1180 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1181 nvmf_tcp_pdu_ch_handle(&tqpair); 1182 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1183 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1184 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1185 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1186 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1187 1188 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1189 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1190 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1191 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1192 tqpair.pdu_in_progress->hdr.common.plen = 0; 1193 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1194 nvmf_tcp_pdu_ch_handle(&tqpair); 1195 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1196 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1197 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1198 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1199 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1200 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1201 1202 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1203 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1204 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1205 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1206 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1207 tqpair.pdu_in_progress->hdr.common.plen = 0; 1208 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1209 nvmf_tcp_pdu_ch_handle(&tqpair); 1210 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1211 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1212 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1213 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1214 struct spdk_nvme_tcp_term_req_hdr)); 1215 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1216 1217 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1218 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1219 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1220 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1221 tqpair.pdu_in_progress->hdr.common.plen = 0; 1222 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1223 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1224 nvmf_tcp_pdu_ch_handle(&tqpair); 1225 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1226 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1227 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1228 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1229 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1230 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1231 1232 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1233 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1234 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1235 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1236 tqpair.pdu_in_progress->hdr.common.plen = 0; 1237 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1238 nvmf_tcp_pdu_ch_handle(&tqpair); 1239 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1240 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1241 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1242 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1243 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1244 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1245 1246 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1247 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1248 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1249 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1250 tqpair.cpda = 1; 1251 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1252 tqpair.pdu_in_progress->hdr.common.plen = 0; 1253 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1254 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1255 nvmf_tcp_pdu_ch_handle(&tqpair); 1256 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1257 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1258 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1259 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1260 struct spdk_nvme_tcp_term_req_hdr)); 1261 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1262 1263 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1264 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1265 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1266 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1267 tqpair.cpda = 1; 1268 tqpair.pdu_in_progress->hdr.common.plen = 0; 1269 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1270 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1271 nvmf_tcp_pdu_ch_handle(&tqpair); 1272 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1273 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1274 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1275 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1276 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1277 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1278 1279 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1280 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1281 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1282 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1283 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1284 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1285 nvmf_tcp_pdu_ch_handle(&tqpair); 1286 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1287 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1288 struct spdk_nvme_tcp_common_pdu_hdr)); 1289 } 1290 1291 static void 1292 test_nvmf_tcp_tls_add_remove_credentials(void) 1293 { 1294 struct spdk_thread *thread; 1295 struct spdk_nvmf_transport *transport; 1296 struct spdk_nvmf_tcp_transport *ttransport; 1297 struct spdk_nvmf_transport_opts opts; 1298 struct spdk_nvmf_subsystem subsystem; 1299 struct tcp_psk_entry *entry; 1300 struct spdk_sock_group grp = {}; 1301 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1302 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1303 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1304 char *psk_file_path = "/tmp/psk.txt"; 1305 bool found = false; 1306 FILE *psk_file = NULL; 1307 mode_t oldmask; 1308 1309 thread = spdk_thread_create(NULL, NULL); 1310 SPDK_CU_ASSERT_FATAL(thread != NULL); 1311 spdk_set_thread(thread); 1312 1313 memset(&opts, 0, sizeof(opts)); 1314 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1315 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1316 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1317 opts.max_io_size = UT_MAX_IO_SIZE; 1318 opts.io_unit_size = UT_IO_UNIT_SIZE; 1319 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1320 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1321 MOCK_SET(spdk_sock_group_create, &grp); 1322 transport = nvmf_tcp_create(&opts); 1323 MOCK_CLEAR_P(spdk_sock_group_create); 1324 1325 memset(&subsystem, 0, sizeof(subsystem)); 1326 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1327 1328 /* Create a text file containing PSK in interchange format. */ 1329 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1330 psk_file = fopen(psk_file_path, "w"); 1331 CU_ASSERT(psk_file != NULL); 1332 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1333 CU_ASSERT(fclose(psk_file) == 0); 1334 umask(oldmask); 1335 1336 struct spdk_json_val psk_json[] = { 1337 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1338 {"psk", 3, SPDK_JSON_VAL_NAME}, 1339 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1340 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1341 }; 1342 1343 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1344 1345 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1346 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1347 if ((strcmp(subnqn, entry->subnqn) == 0) && 1348 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1349 found = true; 1350 } 1351 } 1352 1353 CU_ASSERT(found == true); 1354 found = false; 1355 1356 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1357 1358 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1359 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1360 if ((strcmp(subnqn, entry->subnqn) == 0) && 1361 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1362 found = true; 1363 } 1364 } 1365 1366 CU_ASSERT(found == false); 1367 1368 CU_ASSERT(remove(psk_file_path) == 0); 1369 1370 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1371 1372 spdk_thread_exit(thread); 1373 while (!spdk_thread_is_exited(thread)) { 1374 spdk_thread_poll(thread, 0, 0); 1375 } 1376 spdk_thread_destroy(thread); 1377 } 1378 1379 static void 1380 test_nvmf_tcp_tls_generate_psk_id(void) 1381 { 1382 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1383 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1384 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1385 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1386 char too_small_psk_id[5] = {}; 1387 1388 /* Check if we can generate expected PSK id. */ 1389 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1390 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1391 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1392 1393 /* Test with a buffer that is too small to fit PSK id. */ 1394 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1395 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1396 1397 /* Test with unknown cipher suite. */ 1398 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1399 subnqn, UINT8_MAX) != 0); 1400 } 1401 1402 static void 1403 test_nvmf_tcp_tls_generate_retained_psk(void) 1404 { 1405 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1406 const char psk_reference1[] = {"1234567890ABCDEF"}; 1407 const char psk_reference2[] = {"FEDCBA0987654321"}; 1408 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1409 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1410 char *unhexlified1; 1411 char *unhexlified2; 1412 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1413 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1414 uint8_t too_small_psk_retained[5] = {}; 1415 int psk_retained_len1, psk_retained_len2; 1416 int retained_size; 1417 1418 unhexlified1 = spdk_unhexlify(psk_reference1); 1419 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1420 unhexlified2 = spdk_unhexlify(psk_reference2); 1421 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1422 1423 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1424 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1425 free(unhexlified1); 1426 free(unhexlified2); 1427 1428 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1429 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1430 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1431 CU_ASSERT(retained_size > 0); 1432 1433 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1434 psk_retained2, 1435 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1436 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1437 1438 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1439 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1440 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1441 CU_ASSERT(psk_retained_len1 > 0); 1442 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1443 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1444 CU_ASSERT(psk_retained_len2 > 0); 1445 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1446 1447 /* Make sure that passing unknown value as hash errors out the function. */ 1448 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1449 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1450 1451 /* Make sure that passing buffer insufficient in size errors out the function. */ 1452 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1453 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1454 } 1455 1456 static void 1457 test_nvmf_tcp_tls_generate_tls_psk(void) 1458 { 1459 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1460 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1461 const char psk_reference[] = {"1234567890ABCDEF"}; 1462 char *unhexlified; 1463 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1464 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1465 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1466 uint8_t too_small_psk_tls[5] = {}; 1467 int retained_size, tls_size; 1468 1469 unhexlified = spdk_unhexlify(psk_reference); 1470 CU_ASSERT(unhexlified != NULL); 1471 1472 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1473 free(unhexlified); 1474 1475 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1476 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1477 CU_ASSERT(retained_size > 0); 1478 1479 /* Make sure that different cipher suites produce different TLS PSKs. */ 1480 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1481 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1482 CU_ASSERT(tls_size > 0); 1483 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1484 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1485 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1486 1487 /* Make sure that passing unknown value as hash errors out the function. */ 1488 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1489 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1490 1491 /* Make sure that passing buffer insufficient in size errors out the function. */ 1492 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1493 too_small_psk_tls, sizeof(too_small_psk_tls), 1494 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1495 } 1496 1497 int 1498 main(int argc, char **argv) 1499 { 1500 CU_pSuite suite = NULL; 1501 unsigned int num_failures; 1502 1503 CU_initialize_registry(); 1504 1505 suite = CU_add_suite("nvmf", NULL, NULL); 1506 1507 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1508 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1509 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1510 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1511 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1512 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1513 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1514 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1515 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1516 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1517 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1518 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1519 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1520 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1521 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1522 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1523 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1524 1525 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1526 CU_cleanup_registry(); 1527 return num_failures; 1528 } 1529