1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_sock_get_numa_id, int32_t, (struct spdk_sock *sock), 0); 69 70 DEFINE_STUB(spdk_nvmf_ns_find_host, 71 struct spdk_nvmf_host *, 72 (struct spdk_nvmf_ns *ns, const char *hostnqn), 73 NULL); 74 75 DEFINE_STUB_V(nvmf_get_discovery_log_page, 76 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 77 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 78 79 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 80 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 81 82 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 83 struct spdk_nvmf_ns *, 84 (struct spdk_nvmf_subsystem *subsystem), 85 NULL); 86 87 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 88 struct spdk_nvmf_ns *, 89 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 90 NULL); 91 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 92 (struct spdk_nvmf_subsystem *subsystem), false); 93 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 94 bool, 95 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 96 true); 97 98 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 99 bool, 100 (struct spdk_nvmf_ctrlr *ctrlr), 101 false); 102 103 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 104 bool, 105 (struct spdk_nvmf_ctrlr *ctrlr), 106 false); 107 108 DEFINE_STUB(nvmf_ctrlr_copy_supported, 109 bool, 110 (struct spdk_nvmf_ctrlr *ctrlr), 111 false); 112 113 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 114 int, 115 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 116 struct spdk_nvmf_request *req), 117 0); 118 119 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 120 int, 121 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 122 struct spdk_nvmf_request *req), 123 0); 124 125 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 126 int, 127 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 128 struct spdk_nvmf_request *req), 129 0); 130 131 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 132 int, 133 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 134 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 135 0); 136 137 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 138 int, 139 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 140 struct spdk_nvmf_request *req), 141 0); 142 143 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 144 int, 145 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 146 struct spdk_nvmf_request *req), 147 0); 148 149 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 150 int, 151 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 152 struct spdk_nvmf_request *req), 153 0); 154 155 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 156 int, 157 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 158 struct spdk_nvmf_request *req), 159 0); 160 161 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 162 int, 163 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 164 struct spdk_nvmf_request *req), 165 0); 166 167 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 168 int, 169 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 170 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 171 0); 172 173 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 174 bool, 175 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 176 false); 177 178 DEFINE_STUB_V(nvmf_bdev_ctrlr_identify_iocs_nvm, 179 (struct spdk_nvmf_ns *ns, struct spdk_nvme_nvm_ns_data *nsdata_nvm)); 180 181 DEFINE_STUB(nvmf_transport_req_complete, 182 int, 183 (struct spdk_nvmf_request *req), 184 0); 185 186 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 187 bool, 188 (struct spdk_bdev *bdev), 189 false); 190 191 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 192 int, 193 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 194 struct spdk_nvmf_request *req), 195 0); 196 197 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 198 199 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 200 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 201 struct spdk_nvmf_transport *transport)); 202 203 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 204 int, 205 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 206 0); 207 208 DEFINE_STUB(spdk_sock_group_get_ctx, 209 void *, 210 (struct spdk_sock_group *group), 211 NULL); 212 213 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 214 215 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 216 enum spdk_nvme_transport_type trtype)); 217 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 218 219 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 220 221 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 222 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 223 224 DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s)); 225 226 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 227 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 228 229 DEFINE_STUB(nvmf_transport_req_free, 230 int, 231 (struct spdk_nvmf_request *req), 232 0); 233 234 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 235 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 236 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 237 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 238 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 239 240 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 241 (const struct spdk_bdev *bdev), 0); 242 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 243 (const struct spdk_bdev *bdev), 0); 244 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 245 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 246 247 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 248 (const struct spdk_nvme_ns_data *nsdata), 0); 249 250 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 251 252 DEFINE_STUB(spdk_sock_group_register_interrupt, int, (struct spdk_sock_group *group, 253 uint32_t events, spdk_interrupt_fn fn, void *arg, const char *name), 0); 254 DEFINE_STUB_V(spdk_sock_group_unregister_interrupt, (struct spdk_sock_group *group)); 255 256 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 257 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 258 (const struct spdk_nvmf_subsystem *subsystem), NULL); 259 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 260 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 261 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 262 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 263 264 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 265 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 266 false); 267 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 268 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 269 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 270 DEFINE_STUB(nvmf_request_get_buffers_abort, bool, (struct spdk_nvmf_request *r), false); 271 DEFINE_STUB(spdk_bdev_io_type_supported, bool, 272 (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false); 273 struct spdk_io_channel * 274 spdk_accel_get_io_channel(void) 275 { 276 return spdk_get_io_channel(g_accel_p); 277 } 278 279 DEFINE_STUB(spdk_accel_submit_crc32cv, 280 int, 281 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 282 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 283 0); 284 285 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 286 int, 287 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 288 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 289 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 290 0) 291 292 struct spdk_bdev { 293 int ut_mock; 294 uint64_t blockcnt; 295 }; 296 297 int 298 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 299 const struct spdk_nvme_transport_id *trid2) 300 { 301 return 0; 302 } 303 304 const char * 305 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 306 { 307 switch (trtype) { 308 case SPDK_NVME_TRANSPORT_PCIE: 309 return "PCIe"; 310 case SPDK_NVME_TRANSPORT_RDMA: 311 return "RDMA"; 312 case SPDK_NVME_TRANSPORT_FC: 313 return "FC"; 314 default: 315 return NULL; 316 } 317 } 318 319 int 320 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 321 { 322 int len, i; 323 324 if (trstring == NULL) { 325 return -EINVAL; 326 } 327 328 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 329 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 330 return -EINVAL; 331 } 332 333 /* cast official trstring to uppercase version of input. */ 334 for (i = 0; i < len; i++) { 335 trid->trstring[i] = toupper(trstring[i]); 336 } 337 return 0; 338 } 339 340 int 341 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 342 struct spdk_nvmf_transport_poll_group *group, 343 struct spdk_nvmf_transport *transport, 344 uint32_t length) 345 { 346 /* length more than 1 io unit length will fail. */ 347 if (length >= transport->opts.io_unit_size) { 348 return -EINVAL; 349 } 350 351 req->iovcnt = 1; 352 req->iov[0].iov_base = (void *)0xDEADBEEF; 353 354 return 0; 355 } 356 357 358 void 359 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 360 bool dif_insert_or_strip) 361 { 362 uint64_t num_blocks; 363 364 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 365 num_blocks = ns->bdev->blockcnt; 366 nsdata->nsze = num_blocks; 367 nsdata->ncap = num_blocks; 368 nsdata->nuse = num_blocks; 369 nsdata->nlbaf = 0; 370 nsdata->flbas.format = 0; 371 nsdata->flbas.msb_format = 0; 372 nsdata->lbaf[0].lbads = spdk_u32log2(512); 373 } 374 375 const char * 376 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 377 { 378 return subsystem->sn; 379 } 380 381 const char * 382 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 383 { 384 return subsystem->mn; 385 } 386 387 static void 388 test_nvmf_tcp_create(void) 389 { 390 struct spdk_thread *thread; 391 struct spdk_nvmf_transport *transport; 392 struct spdk_nvmf_tcp_transport *ttransport; 393 struct spdk_nvmf_transport_opts opts; 394 struct spdk_sock_group grp = {}; 395 396 thread = spdk_thread_create(NULL, NULL); 397 SPDK_CU_ASSERT_FATAL(thread != NULL); 398 spdk_set_thread(thread); 399 400 MOCK_SET(spdk_sock_group_create, &grp); 401 402 /* case 1 */ 403 memset(&opts, 0, sizeof(opts)); 404 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 405 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 406 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 407 opts.max_io_size = UT_MAX_IO_SIZE; 408 opts.io_unit_size = UT_IO_UNIT_SIZE; 409 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 410 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 411 /* expect success */ 412 transport = nvmf_tcp_create(&opts); 413 CU_ASSERT_PTR_NOT_NULL(transport); 414 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 415 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 416 transport->opts = opts; 417 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 418 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 419 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 420 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 421 /* destroy transport */ 422 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 423 424 /* case 2 */ 425 memset(&opts, 0, sizeof(opts)); 426 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 427 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 428 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 429 opts.max_io_size = UT_MAX_IO_SIZE; 430 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 431 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 432 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 433 /* expect success */ 434 transport = nvmf_tcp_create(&opts); 435 CU_ASSERT_PTR_NOT_NULL(transport); 436 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 437 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 438 transport->opts = opts; 439 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 440 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 441 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 442 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 443 /* destroy transport */ 444 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 445 446 /* case 3 */ 447 memset(&opts, 0, sizeof(opts)); 448 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 449 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 450 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 451 opts.max_io_size = UT_MAX_IO_SIZE; 452 opts.io_unit_size = 16; 453 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 454 /* expect fails */ 455 transport = nvmf_tcp_create(&opts); 456 CU_ASSERT_PTR_NULL(transport); 457 458 MOCK_CLEAR_P(spdk_sock_group_create); 459 460 spdk_thread_exit(thread); 461 while (!spdk_thread_is_exited(thread)) { 462 spdk_thread_poll(thread, 0, 0); 463 } 464 spdk_thread_destroy(thread); 465 } 466 467 static void 468 test_nvmf_tcp_destroy(void) 469 { 470 struct spdk_thread *thread; 471 struct spdk_nvmf_transport *transport; 472 struct spdk_nvmf_transport_opts opts; 473 struct spdk_sock_group grp = {}; 474 475 thread = spdk_thread_create(NULL, NULL); 476 SPDK_CU_ASSERT_FATAL(thread != NULL); 477 spdk_set_thread(thread); 478 479 /* case 1 */ 480 memset(&opts, 0, sizeof(opts)); 481 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 482 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 483 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 484 opts.max_io_size = UT_MAX_IO_SIZE; 485 opts.io_unit_size = UT_IO_UNIT_SIZE; 486 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 487 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 488 MOCK_SET(spdk_sock_group_create, &grp); 489 transport = nvmf_tcp_create(&opts); 490 MOCK_CLEAR_P(spdk_sock_group_create); 491 CU_ASSERT_PTR_NOT_NULL(transport); 492 transport->opts = opts; 493 /* destroy transport */ 494 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 495 496 spdk_thread_exit(thread); 497 while (!spdk_thread_is_exited(thread)) { 498 spdk_thread_poll(thread, 0, 0); 499 } 500 spdk_thread_destroy(thread); 501 } 502 503 static void 504 init_accel(void) 505 { 506 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 507 sizeof(int), "accel_p"); 508 } 509 510 static void 511 fini_accel(void) 512 { 513 spdk_io_device_unregister(g_accel_p, NULL); 514 } 515 516 static void 517 test_nvmf_tcp_poll_group_create(void) 518 { 519 struct spdk_nvmf_transport *transport; 520 struct spdk_nvmf_transport_poll_group *group; 521 struct spdk_nvmf_tcp_poll_group *tgroup; 522 struct spdk_thread *thread; 523 struct spdk_nvmf_transport_opts opts; 524 struct spdk_sock_group grp = {}; 525 526 thread = spdk_thread_create(NULL, NULL); 527 SPDK_CU_ASSERT_FATAL(thread != NULL); 528 spdk_set_thread(thread); 529 530 init_accel(); 531 532 memset(&opts, 0, sizeof(opts)); 533 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 534 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 535 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 536 opts.max_io_size = UT_MAX_IO_SIZE; 537 opts.io_unit_size = UT_IO_UNIT_SIZE; 538 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 539 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 540 MOCK_SET(spdk_sock_group_create, &grp); 541 transport = nvmf_tcp_create(&opts); 542 MOCK_CLEAR_P(spdk_sock_group_create); 543 CU_ASSERT_PTR_NOT_NULL(transport); 544 transport->opts = opts; 545 MOCK_SET(spdk_sock_group_create, &grp); 546 group = nvmf_tcp_poll_group_create(transport, NULL); 547 MOCK_CLEAR_P(spdk_sock_group_create); 548 SPDK_CU_ASSERT_FATAL(group); 549 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 550 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 551 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 552 } 553 group->transport = transport; 554 nvmf_tcp_poll_group_destroy(group); 555 nvmf_tcp_destroy(transport, NULL, NULL); 556 557 fini_accel(); 558 spdk_thread_exit(thread); 559 while (!spdk_thread_is_exited(thread)) { 560 spdk_thread_poll(thread, 0, 0); 561 } 562 spdk_thread_destroy(thread); 563 } 564 565 static void 566 test_nvmf_tcp_send_c2h_data(void) 567 { 568 struct spdk_thread *thread; 569 struct spdk_nvmf_tcp_transport ttransport = {}; 570 struct spdk_nvmf_tcp_qpair tqpair = {}; 571 struct spdk_nvmf_tcp_req tcp_req = {}; 572 struct nvme_tcp_pdu pdu = {}; 573 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 574 575 ttransport.tcp_opts.c2h_success = true; 576 thread = spdk_thread_create(NULL, NULL); 577 SPDK_CU_ASSERT_FATAL(thread != NULL); 578 spdk_set_thread(thread); 579 580 tcp_req.pdu = &pdu; 581 tcp_req.req.length = 300; 582 tcp_req.req.qpair = &tqpair.qpair; 583 584 tqpair.qpair.transport = &ttransport.transport; 585 586 /* Set qpair state to make unrelated operations NOP */ 587 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 588 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 589 590 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 591 592 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 593 tcp_req.req.iov[0].iov_len = 101; 594 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 595 tcp_req.req.iov[1].iov_len = 100; 596 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 597 tcp_req.req.iov[2].iov_len = 99; 598 tcp_req.req.iovcnt = 3; 599 tcp_req.req.length = 300; 600 601 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 602 603 c2h_data = &pdu.hdr.c2h_data; 604 CU_ASSERT(c2h_data->datao == 0); 605 CU_ASSERT(c2h_data->datal = 300); 606 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 607 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 608 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 609 610 CU_ASSERT(pdu.data_iovcnt == 3); 611 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 612 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 613 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 614 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 615 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 616 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 617 618 tcp_req.pdu_in_use = false; 619 tcp_req.rsp.cdw0 = 1; 620 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 621 622 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 623 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 624 625 ttransport.tcp_opts.c2h_success = false; 626 tcp_req.pdu_in_use = false; 627 tcp_req.rsp.cdw0 = 0; 628 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 629 630 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 631 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 632 633 tcp_req.pdu_in_use = false; 634 tcp_req.rsp.cdw0 = 1; 635 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 636 637 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 638 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 639 640 spdk_thread_exit(thread); 641 while (!spdk_thread_is_exited(thread)) { 642 spdk_thread_poll(thread, 0, 0); 643 } 644 spdk_thread_destroy(thread); 645 } 646 647 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 648 649 static void 650 test_nvmf_tcp_h2c_data_hdr_handle(void) 651 { 652 struct spdk_nvmf_tcp_transport ttransport = {}; 653 struct spdk_nvmf_tcp_qpair tqpair = {}; 654 struct nvme_tcp_pdu pdu = {}; 655 struct spdk_nvmf_tcp_req tcp_req = {}; 656 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 657 658 /* Set qpair state to make unrelated operations NOP */ 659 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 660 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 661 tqpair.resource_count = 1; 662 tqpair.reqs = &tcp_req; 663 664 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 665 tcp_req.req.iov[0].iov_len = 101; 666 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 667 tcp_req.req.iov[1].iov_len = 99; 668 tcp_req.req.iovcnt = 2; 669 tcp_req.req.length = 200; 670 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 671 672 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 673 tcp_req.req.cmd->nvme_cmd.cid = 1; 674 tcp_req.ttag = 1; 675 676 h2c_data = &pdu.hdr.h2c_data; 677 h2c_data->cccid = 1; 678 h2c_data->ttag = 1; 679 h2c_data->datao = 0; 680 h2c_data->datal = 200; 681 682 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 683 684 CU_ASSERT(pdu.data_iovcnt == 2); 685 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 686 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 687 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 688 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 689 } 690 691 692 static void 693 test_nvmf_tcp_in_capsule_data_handle(void) 694 { 695 struct spdk_nvmf_tcp_transport ttransport = {}; 696 struct spdk_nvmf_transport_ops ops = {}; 697 struct spdk_nvmf_tcp_qpair tqpair = {}; 698 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 699 union nvmf_c2h_msg rsp0 = {}; 700 union nvmf_c2h_msg rsp = {}; 701 702 struct spdk_nvmf_tcp_req tcp_req2 = {}; 703 struct spdk_nvmf_tcp_req tcp_req1 = {}; 704 705 struct spdk_nvme_tcp_cmd *capsule_data; 706 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 707 struct spdk_nvme_sgl_descriptor *sgl; 708 709 struct spdk_nvmf_transport_poll_group *group; 710 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 711 struct spdk_sock_group grp = {}; 712 713 tqpair.pdu_in_progress = &pdu_in_progress; 714 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 715 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 716 ttransport.transport.ops = &ops; 717 ops.req_get_buffers_done = nvmf_tcp_req_get_buffers_done; 718 719 tcp_group.sock_group = &grp; 720 TAILQ_INIT(&tcp_group.qpairs); 721 group = &tcp_group.group; 722 group->transport = &ttransport.transport; 723 tqpair.group = &tcp_group; 724 725 TAILQ_INIT(&tqpair.tcp_req_free_queue); 726 TAILQ_INIT(&tqpair.tcp_req_working_queue); 727 728 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 729 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 730 tqpair.qpair.transport = &ttransport.transport; 731 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 732 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 733 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 734 735 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 736 tcp_req2.req.qpair = &tqpair.qpair; 737 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 738 tcp_req2.req.rsp = &rsp; 739 740 /* init tcp_req1 */ 741 tcp_req1.req.qpair = &tqpair.qpair; 742 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 743 tcp_req1.req.rsp = &rsp0; 744 tcp_req1.state = TCP_REQUEST_STATE_NEW; 745 tcp_req1.req.data_from_pool = false; 746 747 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 748 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 749 750 /* init pdu, make pdu need sgl buff */ 751 pdu = tqpair.pdu_in_progress; 752 capsule_data = &pdu->hdr.capsule_cmd; 753 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 754 sgl = &capsule_data->ccsqe.dptr.sgl1; 755 756 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 757 capsule_data->common.hlen = sizeof(*capsule_data); 758 capsule_data->common.plen = 1096; 759 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 760 761 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 762 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 763 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 764 765 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 766 767 /* pretend that tcp_req1 is waiting in the iobuf waiting queue */ 768 nvmf_tcp_req_process(&ttransport, &tcp_req1); 769 CU_ASSERT(tcp_req1.req.data_from_pool == false); 770 771 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 772 773 /* process tqpair capsule req. */ 774 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 775 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 776 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 777 778 /* pretend that buffer for tcp_req1 becomes available */ 779 spdk_nvmf_request_get_buffers(&tcp_req1.req, group, &ttransport.transport, UT_IO_UNIT_SIZE - 1); 780 /* trigger callback as nvmf_request_iobuf_get_cb would */ 781 ttransport.transport.ops->req_get_buffers_done(&tcp_req1.req); 782 CU_ASSERT(tcp_req1.state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 783 } 784 785 static void 786 test_nvmf_tcp_qpair_init_mem_resource(void) 787 { 788 int rc; 789 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 790 struct spdk_nvmf_transport transport = {}; 791 struct spdk_thread *thread; 792 793 thread = spdk_thread_create(NULL, NULL); 794 SPDK_CU_ASSERT_FATAL(thread != NULL); 795 spdk_set_thread(thread); 796 797 tqpair = calloc(1, sizeof(*tqpair)); 798 tqpair->qpair.transport = &transport; 799 800 nvmf_tcp_opts_init(&transport.opts); 801 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 802 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 803 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 804 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 805 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 806 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 807 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 808 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 809 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 810 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 811 CU_ASSERT(transport.opts.transport_specific == NULL); 812 813 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 814 CU_ASSERT(rc == 0); 815 CU_ASSERT(tqpair->host_hdgst_enable == true); 816 CU_ASSERT(tqpair->host_ddgst_enable == true); 817 818 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 819 CU_ASSERT(rc == 0); 820 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 821 CU_ASSERT(tqpair->reqs != NULL); 822 CU_ASSERT(tqpair->bufs != NULL); 823 CU_ASSERT(tqpair->pdus != NULL); 824 /* Just to check the first and last entry */ 825 CU_ASSERT(tqpair->reqs[0].ttag == 1); 826 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 827 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 828 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 829 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 830 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 831 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 832 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 833 CU_ASSERT(tqpair->reqs[127].ttag == 128); 834 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 835 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 836 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 837 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 838 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 839 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 840 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 841 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 842 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 843 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 844 CU_ASSERT(tqpair->pdu_in_progress == 845 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 846 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 847 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 848 849 /* Free all of tqpair resource */ 850 nvmf_tcp_qpair_destroy(tqpair); 851 852 spdk_thread_exit(thread); 853 while (!spdk_thread_is_exited(thread)) { 854 spdk_thread_poll(thread, 0, 0); 855 } 856 spdk_thread_destroy(thread); 857 } 858 859 static void 860 test_nvmf_tcp_send_c2h_term_req(void) 861 { 862 struct spdk_nvmf_tcp_qpair tqpair = {}; 863 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 864 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 865 uint32_t error_offset = 1; 866 867 mgmt_pdu.qpair = &tqpair; 868 tqpair.mgmt_pdu = &mgmt_pdu; 869 tqpair.pdu_in_progress = &pdu_in_progress; 870 tqpair.tcp_pdu_working_count = 1; 871 872 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 873 pdu.hdr.common.hlen = 64; 874 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 875 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 876 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 877 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 878 pdu.hdr.common.hlen); 879 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 880 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 881 882 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 883 pdu.hdr.common.hlen = 255; 884 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 885 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 886 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 887 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 888 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 889 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 890 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 891 } 892 893 static void 894 test_nvmf_tcp_send_capsule_resp_pdu(void) 895 { 896 struct spdk_nvmf_tcp_req tcp_req = {}; 897 struct spdk_nvmf_tcp_qpair tqpair = {}; 898 struct nvme_tcp_pdu pdu = {}; 899 900 tcp_req.pdu_in_use = false; 901 tcp_req.req.qpair = &tqpair.qpair; 902 tcp_req.pdu = &pdu; 903 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 904 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 905 tqpair.host_hdgst_enable = true; 906 907 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 908 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 909 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 910 SPDK_NVME_TCP_DIGEST_LEN); 911 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 912 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 913 sizeof(struct spdk_nvme_cpl))); 914 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 915 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 916 CU_ASSERT(pdu.cb_arg == &tcp_req); 917 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 918 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 919 920 /* hdgst disable */ 921 tqpair.host_hdgst_enable = false; 922 tcp_req.pdu_in_use = false; 923 memset(&pdu, 0, sizeof(pdu)); 924 925 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 926 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 927 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 928 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 929 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 930 sizeof(struct spdk_nvme_cpl))); 931 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 932 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 933 CU_ASSERT(pdu.cb_arg == &tcp_req); 934 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 935 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 936 } 937 938 static void 939 test_nvmf_tcp_icreq_handle(void) 940 { 941 struct spdk_nvmf_tcp_transport ttransport = {}; 942 struct spdk_nvmf_tcp_qpair tqpair = {}; 943 struct nvme_tcp_pdu pdu = {}; 944 struct nvme_tcp_pdu mgmt_pdu = {}; 945 struct nvme_tcp_pdu pdu_in_progress = {}; 946 struct spdk_nvme_tcp_ic_resp *ic_resp; 947 948 mgmt_pdu.qpair = &tqpair; 949 tqpair.mgmt_pdu = &mgmt_pdu; 950 tqpair.pdu_in_progress = &pdu_in_progress; 951 tqpair.tcp_pdu_working_count = 1; 952 953 /* case 1: Expected ICReq PFV 0 and got are different. */ 954 pdu.hdr.ic_req.pfv = 1; 955 956 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 957 958 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 959 960 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 961 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 962 963 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 964 965 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 966 967 /* case 3: Expect: PASS. */ 968 ttransport.transport.opts.max_io_size = 32; 969 pdu.hdr.ic_req.pfv = 0; 970 tqpair.host_hdgst_enable = false; 971 tqpair.host_ddgst_enable = false; 972 tqpair.recv_buf_size = 64; 973 pdu.hdr.ic_req.hpda = 16; 974 975 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 976 977 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 978 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 979 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 980 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 981 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 982 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 983 CU_ASSERT(ic_resp->pfv == 0); 984 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 985 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 986 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 987 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 988 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 989 } 990 991 static void 992 test_nvmf_tcp_check_xfer_type(void) 993 { 994 const uint16_t cid = 0xAA; 995 struct spdk_nvmf_tcp_transport ttransport = {}; 996 struct spdk_nvmf_tcp_qpair tqpair = {}; 997 struct nvme_tcp_pdu pdu_in_progress = {}; 998 union nvmf_c2h_msg rsp0 = {}; 999 1000 struct spdk_nvmf_tcp_req tcp_req = {}; 1001 struct nvme_tcp_pdu rsp_pdu = {}; 1002 1003 struct spdk_nvme_tcp_cmd *capsule_data; 1004 struct spdk_nvme_sgl_descriptor *sgl; 1005 1006 struct spdk_nvmf_transport_poll_group *group; 1007 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1008 struct spdk_sock_group grp = {}; 1009 1010 tqpair.pdu_in_progress = &pdu_in_progress; 1011 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1012 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1013 1014 tcp_group.sock_group = &grp; 1015 TAILQ_INIT(&tcp_group.qpairs); 1016 group = &tcp_group.group; 1017 group->transport = &ttransport.transport; 1018 tqpair.group = &tcp_group; 1019 1020 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1021 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1022 1023 tqpair.qpair.transport = &ttransport.transport; 1024 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1025 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1026 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1027 1028 /* init tcp_req */ 1029 tcp_req.req.qpair = &tqpair.qpair; 1030 tcp_req.pdu = &rsp_pdu; 1031 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1032 tcp_req.req.rsp = &rsp0; 1033 tcp_req.state = TCP_REQUEST_STATE_NEW; 1034 1035 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1036 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1037 1038 /* init pdu, make pdu need sgl buff */ 1039 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1040 sgl = &capsule_data->ccsqe.dptr.sgl1; 1041 1042 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1043 capsule_data->common.hlen = sizeof(*capsule_data); 1044 capsule_data->common.plen = 1096; 1045 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1046 /* Need to set to a non zero valid to check it gets copied to the response */ 1047 capsule_data->ccsqe.cid = cid; 1048 1049 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1050 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1051 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1052 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1053 1054 /* Process a command and ensure that it fails and the request is set up to return an error */ 1055 nvmf_tcp_req_process(&ttransport, &tcp_req); 1056 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1057 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1058 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1059 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1060 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1061 } 1062 1063 static void 1064 test_nvmf_tcp_invalid_sgl(void) 1065 { 1066 const uint16_t cid = 0xAABB; 1067 struct spdk_nvmf_tcp_transport ttransport = {}; 1068 struct spdk_nvmf_tcp_qpair tqpair = {}; 1069 struct nvme_tcp_pdu pdu_in_progress = {}; 1070 union nvmf_c2h_msg rsp0 = {}; 1071 1072 struct spdk_nvmf_tcp_req tcp_req = {}; 1073 struct nvme_tcp_pdu rsp_pdu = {}; 1074 struct nvme_tcp_pdu mgmt_pdu = {}; 1075 1076 struct spdk_nvme_tcp_cmd *capsule_data; 1077 struct spdk_nvme_sgl_descriptor *sgl; 1078 1079 struct spdk_nvmf_transport_poll_group *group; 1080 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1081 struct spdk_sock_group grp = {}; 1082 1083 tqpair.pdu_in_progress = &pdu_in_progress; 1084 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1085 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1086 1087 tcp_group.sock_group = &grp; 1088 TAILQ_INIT(&tcp_group.qpairs); 1089 group = &tcp_group.group; 1090 group->transport = &ttransport.transport; 1091 tqpair.group = &tcp_group; 1092 1093 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1094 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1095 1096 tqpair.qpair.transport = &ttransport.transport; 1097 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1098 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1099 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1100 1101 /* init tcp_req */ 1102 tcp_req.req.qpair = &tqpair.qpair; 1103 tcp_req.pdu = &rsp_pdu; 1104 tcp_req.pdu->qpair = &tqpair; 1105 tqpair.mgmt_pdu = &mgmt_pdu; 1106 tqpair.mgmt_pdu->qpair = &tqpair; 1107 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1108 tcp_req.req.rsp = &rsp0; 1109 tcp_req.state = TCP_REQUEST_STATE_NEW; 1110 1111 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1112 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1113 1114 /* init pdu, make pdu need sgl buff */ 1115 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1116 sgl = &capsule_data->ccsqe.dptr.sgl1; 1117 1118 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1119 capsule_data->common.hlen = sizeof(*capsule_data); 1120 capsule_data->common.plen = 1096; 1121 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1122 /* Need to set to a non zero valid to check it gets copied to the response */ 1123 capsule_data->ccsqe.cid = cid; 1124 1125 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1126 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1127 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1128 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1129 1130 /* Process a command and ensure that it fails and the request is set up to return an error */ 1131 nvmf_tcp_req_process(&ttransport, &tcp_req); 1132 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1133 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1134 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1135 } 1136 1137 static void 1138 test_nvmf_tcp_pdu_ch_handle(void) 1139 { 1140 struct spdk_nvmf_tcp_qpair tqpair = {}; 1141 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1142 1143 mgmt_pdu.qpair = &tqpair; 1144 tqpair.mgmt_pdu = &mgmt_pdu; 1145 tqpair.pdu_in_progress = &pdu_in_progress; 1146 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1147 tqpair.cpda = 0; 1148 1149 /* Test case: Already received ICreq PDU. Expect: fail */ 1150 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1151 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1152 nvmf_tcp_pdu_ch_handle(&tqpair); 1153 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1154 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1155 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1156 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1157 1158 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1159 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1160 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1161 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1162 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1163 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1164 nvmf_tcp_pdu_ch_handle(&tqpair); 1165 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1166 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1167 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1168 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1169 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1170 1171 /* Test case: The TCP/IP tqpair connection is not negotiated. Expect: fail */ 1172 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1173 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1174 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1175 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1176 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1177 nvmf_tcp_pdu_ch_handle(&tqpair); 1178 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1179 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1180 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1181 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1182 1183 /* Test case: Unexpected PDU type. Expect: fail */ 1184 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1185 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1186 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1187 tqpair.pdu_in_progress->hdr.common.plen = 0; 1188 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1189 nvmf_tcp_pdu_ch_handle(&tqpair); 1190 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1191 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1192 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1193 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1194 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1195 1196 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1197 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1198 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1199 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1200 tqpair.pdu_in_progress->hdr.common.plen = 0; 1201 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1202 nvmf_tcp_pdu_ch_handle(&tqpair); 1203 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1204 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1205 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1206 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1207 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1208 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1209 1210 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1211 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1212 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1213 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1214 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1215 tqpair.pdu_in_progress->hdr.common.plen = 0; 1216 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1217 nvmf_tcp_pdu_ch_handle(&tqpair); 1218 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1219 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1220 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1221 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1222 struct spdk_nvme_tcp_term_req_hdr)); 1223 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1224 1225 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1226 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1227 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1228 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1229 tqpair.pdu_in_progress->hdr.common.plen = 0; 1230 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1231 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1232 nvmf_tcp_pdu_ch_handle(&tqpair); 1233 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1234 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1235 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1236 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1237 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1238 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1239 1240 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1241 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1242 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1243 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1244 tqpair.pdu_in_progress->hdr.common.plen = 0; 1245 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1246 nvmf_tcp_pdu_ch_handle(&tqpair); 1247 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1248 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1249 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1250 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1251 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1252 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1253 1254 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1255 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1256 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1257 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1258 tqpair.cpda = 1; 1259 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1260 tqpair.pdu_in_progress->hdr.common.plen = 0; 1261 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1262 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1263 nvmf_tcp_pdu_ch_handle(&tqpair); 1264 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1265 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1266 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1267 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1268 struct spdk_nvme_tcp_term_req_hdr)); 1269 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1270 1271 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1272 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1273 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1274 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1275 tqpair.cpda = 1; 1276 tqpair.pdu_in_progress->hdr.common.plen = 0; 1277 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1278 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1279 nvmf_tcp_pdu_ch_handle(&tqpair); 1280 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1281 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1282 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1283 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1284 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1285 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1286 1287 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1288 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1289 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1290 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1291 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1292 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1293 nvmf_tcp_pdu_ch_handle(&tqpair); 1294 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1295 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1296 struct spdk_nvme_tcp_common_pdu_hdr)); 1297 } 1298 1299 static void 1300 test_nvmf_tcp_tls_add_remove_credentials(void) 1301 { 1302 struct spdk_thread *thread; 1303 struct spdk_nvmf_transport *transport; 1304 struct spdk_nvmf_tcp_transport *ttransport; 1305 struct spdk_nvmf_transport_opts opts; 1306 struct spdk_nvmf_subsystem subsystem; 1307 struct tcp_psk_entry *entry; 1308 struct spdk_sock_group grp = {}; 1309 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1310 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1311 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1312 char *psk_file_path = "/tmp/psk.txt"; 1313 bool found = false; 1314 FILE *psk_file = NULL; 1315 mode_t oldmask; 1316 1317 thread = spdk_thread_create(NULL, NULL); 1318 SPDK_CU_ASSERT_FATAL(thread != NULL); 1319 spdk_set_thread(thread); 1320 1321 memset(&opts, 0, sizeof(opts)); 1322 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1323 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1324 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1325 opts.max_io_size = UT_MAX_IO_SIZE; 1326 opts.io_unit_size = UT_IO_UNIT_SIZE; 1327 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1328 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1329 MOCK_SET(spdk_sock_group_create, &grp); 1330 transport = nvmf_tcp_create(&opts); 1331 MOCK_CLEAR_P(spdk_sock_group_create); 1332 1333 memset(&subsystem, 0, sizeof(subsystem)); 1334 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1335 1336 /* Create a text file containing PSK in interchange format. */ 1337 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1338 psk_file = fopen(psk_file_path, "w"); 1339 CU_ASSERT(psk_file != NULL); 1340 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1341 CU_ASSERT(fclose(psk_file) == 0); 1342 umask(oldmask); 1343 1344 struct spdk_json_val psk_json[] = { 1345 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1346 {"psk", 3, SPDK_JSON_VAL_NAME}, 1347 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1348 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1349 }; 1350 1351 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1352 1353 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1354 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1355 if ((strcmp(subnqn, entry->subnqn) == 0) && 1356 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1357 found = true; 1358 } 1359 } 1360 1361 CU_ASSERT(found == true); 1362 found = false; 1363 1364 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1365 1366 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1367 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1368 if ((strcmp(subnqn, entry->subnqn) == 0) && 1369 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1370 found = true; 1371 } 1372 } 1373 1374 CU_ASSERT(found == false); 1375 1376 CU_ASSERT(remove(psk_file_path) == 0); 1377 1378 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1379 1380 spdk_thread_exit(thread); 1381 while (!spdk_thread_is_exited(thread)) { 1382 spdk_thread_poll(thread, 0, 0); 1383 } 1384 spdk_thread_destroy(thread); 1385 } 1386 1387 static void 1388 test_nvmf_tcp_tls_generate_psk_id(void) 1389 { 1390 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1391 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1392 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1393 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1394 char too_small_psk_id[5] = {}; 1395 1396 /* Check if we can generate expected PSK id. */ 1397 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1398 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1399 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1400 1401 /* Test with a buffer that is too small to fit PSK id. */ 1402 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1403 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1404 1405 /* Test with unknown cipher suite. */ 1406 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1407 subnqn, UINT8_MAX) != 0); 1408 } 1409 1410 static void 1411 test_nvmf_tcp_tls_generate_retained_psk(void) 1412 { 1413 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1414 const char psk_reference1[] = {"1234567890ABCDEF"}; 1415 const char psk_reference2[] = {"FEDCBA0987654321"}; 1416 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1417 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1418 char *unhexlified1; 1419 char *unhexlified2; 1420 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1421 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1422 uint8_t too_small_psk_retained[5] = {}; 1423 int psk_retained_len1, psk_retained_len2; 1424 int retained_size; 1425 1426 unhexlified1 = spdk_unhexlify(psk_reference1); 1427 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1428 unhexlified2 = spdk_unhexlify(psk_reference2); 1429 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1430 1431 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1432 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1433 free(unhexlified1); 1434 free(unhexlified2); 1435 1436 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1437 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1438 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1439 CU_ASSERT(retained_size > 0); 1440 1441 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1442 psk_retained2, 1443 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1444 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1445 1446 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1447 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1448 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1449 CU_ASSERT(psk_retained_len1 > 0); 1450 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1451 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1452 CU_ASSERT(psk_retained_len2 > 0); 1453 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1454 1455 /* Make sure that passing unknown value as hash errors out the function. */ 1456 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1457 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1458 1459 /* Make sure that passing buffer insufficient in size errors out the function. */ 1460 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1461 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1462 } 1463 1464 static void 1465 test_nvmf_tcp_tls_generate_tls_psk(void) 1466 { 1467 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1468 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1469 const char psk_reference[] = {"1234567890ABCDEF"}; 1470 char *unhexlified; 1471 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1472 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1473 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1474 uint8_t too_small_psk_tls[5] = {}; 1475 int retained_size, tls_size; 1476 1477 unhexlified = spdk_unhexlify(psk_reference); 1478 CU_ASSERT(unhexlified != NULL); 1479 1480 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1481 free(unhexlified); 1482 1483 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1484 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1485 CU_ASSERT(retained_size > 0); 1486 1487 /* Make sure that different cipher suites produce different TLS PSKs. */ 1488 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1489 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1490 CU_ASSERT(tls_size > 0); 1491 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1492 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1493 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1494 1495 /* Make sure that passing unknown value as hash errors out the function. */ 1496 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1497 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1498 1499 /* Make sure that passing buffer insufficient in size errors out the function. */ 1500 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1501 too_small_psk_tls, sizeof(too_small_psk_tls), 1502 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1503 } 1504 1505 int 1506 main(int argc, char **argv) 1507 { 1508 CU_pSuite suite = NULL; 1509 unsigned int num_failures; 1510 1511 CU_initialize_registry(); 1512 1513 suite = CU_add_suite("nvmf", NULL, NULL); 1514 1515 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1516 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1517 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1518 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1519 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1520 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1521 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1522 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1523 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1524 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1525 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1526 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1527 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1528 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1529 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1530 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1531 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1532 1533 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1534 CU_cleanup_registry(); 1535 return num_failures; 1536 } 1537