1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "spdk/stdinc.h" 8 #include "spdk/nvmf_spec.h" 9 #include "spdk_internal/cunit.h" 10 #include "spdk/bdev_zone.h" 11 12 #include "common/lib/test_env.c" 13 #include "common/lib/test_sock.c" 14 15 #include "nvmf/ctrlr.c" 16 #include "nvmf/tcp.c" 17 #include "spdk/sock.h" 18 #include "spdk/hexlify.h" 19 20 #define UT_IPV4_ADDR "192.168.0.1" 21 #define UT_PORT "4420" 22 #define UT_NVMF_ADRFAM_INVALID 0xf 23 #define UT_MAX_QUEUE_DEPTH 128 24 #define UT_MAX_QPAIRS_PER_CTRLR 128 25 #define UT_IN_CAPSULE_DATA_SIZE 1024 26 #define UT_MAX_IO_SIZE 4096 27 #define UT_IO_UNIT_SIZE 1024 28 #define UT_MAX_AQ_DEPTH 64 29 #define UT_SQ_HEAD_MAX 128 30 #define UT_NUM_SHARED_BUFFERS 128 31 32 static void *g_accel_p = (void *)0xdeadbeaf; 33 34 SPDK_LOG_REGISTER_COMPONENT(nvmf) 35 36 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, 37 int, 38 (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid), 39 0); 40 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0); 41 42 DEFINE_STUB(nvmf_subsystem_add_ctrlr, 43 int, 44 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr), 45 0); 46 47 DEFINE_STUB(nvmf_subsystem_get_ctrlr, 48 struct spdk_nvmf_ctrlr *, 49 (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid), 50 NULL); 51 52 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem, 53 struct spdk_nvmf_subsystem *, 54 (struct spdk_nvmf_tgt *tgt, const char *subnqn), 55 NULL); 56 57 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed, 58 bool, 59 (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid), 60 true); 61 62 DEFINE_STUB(nvmf_subsystem_find_listener, 63 struct spdk_nvmf_subsystem_listener *, 64 (struct spdk_nvmf_subsystem *subsystem, 65 const struct spdk_nvme_transport_id *trid), 66 (void *)0x1); 67 68 DEFINE_STUB(spdk_nvmf_ns_find_host, 69 struct spdk_nvmf_host *, 70 (struct spdk_nvmf_ns *ns, const char *hostnqn), 71 NULL); 72 73 DEFINE_STUB_V(nvmf_get_discovery_log_page, 74 (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov, 75 uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid)); 76 77 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr, 78 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr)); 79 80 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns, 81 struct spdk_nvmf_ns *, 82 (struct spdk_nvmf_subsystem *subsystem), 83 NULL); 84 85 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns, 86 struct spdk_nvmf_ns *, 87 (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns), 88 NULL); 89 DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool, 90 (struct spdk_nvmf_subsystem *subsystem), false); 91 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, 92 bool, 93 (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), 94 true); 95 96 DEFINE_STUB(nvmf_ctrlr_dsm_supported, 97 bool, 98 (struct spdk_nvmf_ctrlr *ctrlr), 99 false); 100 101 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported, 102 bool, 103 (struct spdk_nvmf_ctrlr *ctrlr), 104 false); 105 106 DEFINE_STUB(nvmf_ctrlr_copy_supported, 107 bool, 108 (struct spdk_nvmf_ctrlr *ctrlr), 109 false); 110 111 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd, 112 int, 113 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 114 struct spdk_nvmf_request *req), 115 0); 116 117 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd, 118 int, 119 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 120 struct spdk_nvmf_request *req), 121 0); 122 123 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd, 124 int, 125 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 126 struct spdk_nvmf_request *req), 127 0); 128 129 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd, 130 int, 131 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 132 struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req), 133 0); 134 135 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd, 136 int, 137 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 138 struct spdk_nvmf_request *req), 139 0); 140 141 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd, 142 int, 143 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 144 struct spdk_nvmf_request *req), 145 0); 146 147 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd, 148 int, 149 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 150 struct spdk_nvmf_request *req), 151 0); 152 153 DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd, 154 int, 155 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 156 struct spdk_nvmf_request *req), 157 0); 158 159 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io, 160 int, 161 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 162 struct spdk_nvmf_request *req), 163 0); 164 165 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd, 166 int, 167 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 168 struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort), 169 0); 170 171 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, 172 bool, 173 (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx), 174 false); 175 176 DEFINE_STUB_V(nvmf_bdev_ctrlr_identify_iocs_nvm, 177 (struct spdk_nvmf_ns *ns, struct spdk_nvme_nvm_ns_data *nsdata_nvm)); 178 179 DEFINE_STUB(nvmf_transport_req_complete, 180 int, 181 (struct spdk_nvmf_request *req), 182 0); 183 184 DEFINE_STUB(nvmf_bdev_zcopy_enabled, 185 bool, 186 (struct spdk_bdev *bdev), 187 false); 188 189 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start, 190 int, 191 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 192 struct spdk_nvmf_request *req), 193 0); 194 195 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 196 197 DEFINE_STUB_V(spdk_nvmf_request_free_buffers, 198 (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group, 199 struct spdk_nvmf_transport *transport)); 200 201 DEFINE_STUB(spdk_sock_get_optimal_sock_group, 202 int, 203 (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint), 204 0); 205 206 DEFINE_STUB(spdk_sock_group_get_ctx, 207 void *, 208 (struct spdk_sock_group *group), 209 NULL); 210 211 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx)); 212 213 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 214 enum spdk_nvme_transport_type trtype)); 215 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops)); 216 217 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)); 218 219 DEFINE_STUB_V(nvmf_transport_qpair_abort_request, 220 (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req)); 221 222 DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s)); 223 224 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd)); 225 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl)); 226 227 DEFINE_STUB(nvmf_transport_req_free, 228 int, 229 (struct spdk_nvmf_request *req), 230 0); 231 232 DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0); 233 DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf)); 234 DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 235 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 236 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 237 238 DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t, 239 (const struct spdk_bdev *bdev), 0); 240 DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t, 241 (const struct spdk_bdev *bdev), 0); 242 DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false); 243 DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0); 244 245 DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t, 246 (const struct spdk_nvme_ns_data *nsdata), 0); 247 248 DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), ""); 249 250 DEFINE_STUB(spdk_sock_group_register_interrupt, int, (struct spdk_sock_group *group, 251 uint32_t events, spdk_interrupt_fn fn, void *arg, const char *name), 0); 252 DEFINE_STUB_V(spdk_sock_group_unregister_interrupt, (struct spdk_sock_group *group)); 253 254 DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false); 255 DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *, 256 (const struct spdk_nvmf_subsystem *subsystem), NULL); 257 DEFINE_STUB(spdk_keyring_get_key, struct spdk_key *, (const char *name), NULL); 258 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k)); 259 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *k), NULL); 260 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *k, void *buf, int len), 1); 261 262 DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false); 263 DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n), 264 false); 265 DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0); 266 DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r), 267 SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS); 268 DEFINE_STUB(nvmf_request_get_buffers_abort, bool, (struct spdk_nvmf_request *r), false); 269 DEFINE_STUB(spdk_bdev_io_type_supported, bool, 270 (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false); 271 struct spdk_io_channel * 272 spdk_accel_get_io_channel(void) 273 { 274 return spdk_get_io_channel(g_accel_p); 275 } 276 277 DEFINE_STUB(spdk_accel_submit_crc32cv, 278 int, 279 (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs, 280 uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg), 281 0); 282 283 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, 284 int, 285 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 286 struct spdk_io_channel *ch, struct spdk_nvmf_request *req, 287 spdk_nvmf_nvme_passthru_cmd_cb cb_fn), 288 0) 289 290 struct spdk_bdev { 291 int ut_mock; 292 uint64_t blockcnt; 293 }; 294 295 int 296 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1, 297 const struct spdk_nvme_transport_id *trid2) 298 { 299 return 0; 300 } 301 302 const char * 303 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 304 { 305 switch (trtype) { 306 case SPDK_NVME_TRANSPORT_PCIE: 307 return "PCIe"; 308 case SPDK_NVME_TRANSPORT_RDMA: 309 return "RDMA"; 310 case SPDK_NVME_TRANSPORT_FC: 311 return "FC"; 312 default: 313 return NULL; 314 } 315 } 316 317 int 318 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring) 319 { 320 int len, i; 321 322 if (trstring == NULL) { 323 return -EINVAL; 324 } 325 326 len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN); 327 if (len == SPDK_NVMF_TRSTRING_MAX_LEN) { 328 return -EINVAL; 329 } 330 331 /* cast official trstring to uppercase version of input. */ 332 for (i = 0; i < len; i++) { 333 trid->trstring[i] = toupper(trstring[i]); 334 } 335 return 0; 336 } 337 338 int 339 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req, 340 struct spdk_nvmf_transport_poll_group *group, 341 struct spdk_nvmf_transport *transport, 342 uint32_t length) 343 { 344 /* length more than 1 io unit length will fail. */ 345 if (length >= transport->opts.io_unit_size) { 346 return -EINVAL; 347 } 348 349 req->iovcnt = 1; 350 req->iov[0].iov_base = (void *)0xDEADBEEF; 351 352 return 0; 353 } 354 355 356 void 357 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata, 358 bool dif_insert_or_strip) 359 { 360 uint64_t num_blocks; 361 362 SPDK_CU_ASSERT_FATAL(ns->bdev != NULL); 363 num_blocks = ns->bdev->blockcnt; 364 nsdata->nsze = num_blocks; 365 nsdata->ncap = num_blocks; 366 nsdata->nuse = num_blocks; 367 nsdata->nlbaf = 0; 368 nsdata->flbas.format = 0; 369 nsdata->flbas.msb_format = 0; 370 nsdata->lbaf[0].lbads = spdk_u32log2(512); 371 } 372 373 const char * 374 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem) 375 { 376 return subsystem->sn; 377 } 378 379 const char * 380 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem) 381 { 382 return subsystem->mn; 383 } 384 385 static void 386 test_nvmf_tcp_create(void) 387 { 388 struct spdk_thread *thread; 389 struct spdk_nvmf_transport *transport; 390 struct spdk_nvmf_tcp_transport *ttransport; 391 struct spdk_nvmf_transport_opts opts; 392 struct spdk_sock_group grp = {}; 393 394 thread = spdk_thread_create(NULL, NULL); 395 SPDK_CU_ASSERT_FATAL(thread != NULL); 396 spdk_set_thread(thread); 397 398 MOCK_SET(spdk_sock_group_create, &grp); 399 400 /* case 1 */ 401 memset(&opts, 0, sizeof(opts)); 402 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 403 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 404 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 405 opts.max_io_size = UT_MAX_IO_SIZE; 406 opts.io_unit_size = UT_IO_UNIT_SIZE; 407 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 408 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 409 /* expect success */ 410 transport = nvmf_tcp_create(&opts); 411 CU_ASSERT_PTR_NOT_NULL(transport); 412 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 413 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 414 transport->opts = opts; 415 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 416 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 417 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 418 CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE); 419 /* destroy transport */ 420 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 421 422 /* case 2 */ 423 memset(&opts, 0, sizeof(opts)); 424 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 425 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 426 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 427 opts.max_io_size = UT_MAX_IO_SIZE; 428 opts.io_unit_size = UT_MAX_IO_SIZE + 1; 429 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 430 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 431 /* expect success */ 432 transport = nvmf_tcp_create(&opts); 433 CU_ASSERT_PTR_NOT_NULL(transport); 434 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 435 SPDK_CU_ASSERT_FATAL(ttransport != NULL); 436 transport->opts = opts; 437 CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH); 438 CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE); 439 CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE); 440 CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE); 441 /* destroy transport */ 442 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 443 444 /* case 3 */ 445 memset(&opts, 0, sizeof(opts)); 446 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 447 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 448 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 449 opts.max_io_size = UT_MAX_IO_SIZE; 450 opts.io_unit_size = 16; 451 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 452 /* expect fails */ 453 transport = nvmf_tcp_create(&opts); 454 CU_ASSERT_PTR_NULL(transport); 455 456 MOCK_CLEAR_P(spdk_sock_group_create); 457 458 spdk_thread_exit(thread); 459 while (!spdk_thread_is_exited(thread)) { 460 spdk_thread_poll(thread, 0, 0); 461 } 462 spdk_thread_destroy(thread); 463 } 464 465 static void 466 test_nvmf_tcp_destroy(void) 467 { 468 struct spdk_thread *thread; 469 struct spdk_nvmf_transport *transport; 470 struct spdk_nvmf_transport_opts opts; 471 struct spdk_sock_group grp = {}; 472 473 thread = spdk_thread_create(NULL, NULL); 474 SPDK_CU_ASSERT_FATAL(thread != NULL); 475 spdk_set_thread(thread); 476 477 /* case 1 */ 478 memset(&opts, 0, sizeof(opts)); 479 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 480 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 481 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 482 opts.max_io_size = UT_MAX_IO_SIZE; 483 opts.io_unit_size = UT_IO_UNIT_SIZE; 484 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 485 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 486 MOCK_SET(spdk_sock_group_create, &grp); 487 transport = nvmf_tcp_create(&opts); 488 MOCK_CLEAR_P(spdk_sock_group_create); 489 CU_ASSERT_PTR_NOT_NULL(transport); 490 transport->opts = opts; 491 /* destroy transport */ 492 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 493 494 spdk_thread_exit(thread); 495 while (!spdk_thread_is_exited(thread)) { 496 spdk_thread_poll(thread, 0, 0); 497 } 498 spdk_thread_destroy(thread); 499 } 500 501 static void 502 init_accel(void) 503 { 504 spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy, 505 sizeof(int), "accel_p"); 506 } 507 508 static void 509 fini_accel(void) 510 { 511 spdk_io_device_unregister(g_accel_p, NULL); 512 } 513 514 static void 515 test_nvmf_tcp_poll_group_create(void) 516 { 517 struct spdk_nvmf_transport *transport; 518 struct spdk_nvmf_transport_poll_group *group; 519 struct spdk_nvmf_tcp_poll_group *tgroup; 520 struct spdk_thread *thread; 521 struct spdk_nvmf_transport_opts opts; 522 struct spdk_sock_group grp = {}; 523 524 thread = spdk_thread_create(NULL, NULL); 525 SPDK_CU_ASSERT_FATAL(thread != NULL); 526 spdk_set_thread(thread); 527 528 init_accel(); 529 530 memset(&opts, 0, sizeof(opts)); 531 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 532 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 533 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 534 opts.max_io_size = UT_MAX_IO_SIZE; 535 opts.io_unit_size = UT_IO_UNIT_SIZE; 536 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 537 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 538 MOCK_SET(spdk_sock_group_create, &grp); 539 transport = nvmf_tcp_create(&opts); 540 MOCK_CLEAR_P(spdk_sock_group_create); 541 CU_ASSERT_PTR_NOT_NULL(transport); 542 transport->opts = opts; 543 MOCK_SET(spdk_sock_group_create, &grp); 544 group = nvmf_tcp_poll_group_create(transport, NULL); 545 MOCK_CLEAR_P(spdk_sock_group_create); 546 SPDK_CU_ASSERT_FATAL(group); 547 if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) { 548 tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group); 549 SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list); 550 } 551 group->transport = transport; 552 nvmf_tcp_poll_group_destroy(group); 553 nvmf_tcp_destroy(transport, NULL, NULL); 554 555 fini_accel(); 556 spdk_thread_exit(thread); 557 while (!spdk_thread_is_exited(thread)) { 558 spdk_thread_poll(thread, 0, 0); 559 } 560 spdk_thread_destroy(thread); 561 } 562 563 static void 564 test_nvmf_tcp_send_c2h_data(void) 565 { 566 struct spdk_thread *thread; 567 struct spdk_nvmf_tcp_transport ttransport = {}; 568 struct spdk_nvmf_tcp_qpair tqpair = {}; 569 struct spdk_nvmf_tcp_req tcp_req = {}; 570 struct nvme_tcp_pdu pdu = {}; 571 struct spdk_nvme_tcp_c2h_data_hdr *c2h_data; 572 573 ttransport.tcp_opts.c2h_success = true; 574 thread = spdk_thread_create(NULL, NULL); 575 SPDK_CU_ASSERT_FATAL(thread != NULL); 576 spdk_set_thread(thread); 577 578 tcp_req.pdu = &pdu; 579 tcp_req.req.length = 300; 580 tcp_req.req.qpair = &tqpair.qpair; 581 582 tqpair.qpair.transport = &ttransport.transport; 583 584 /* Set qpair state to make unrelated operations NOP */ 585 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 586 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 587 588 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 589 590 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 591 tcp_req.req.iov[0].iov_len = 101; 592 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 593 tcp_req.req.iov[1].iov_len = 100; 594 tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE; 595 tcp_req.req.iov[2].iov_len = 99; 596 tcp_req.req.iovcnt = 3; 597 tcp_req.req.length = 300; 598 599 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 600 601 c2h_data = &pdu.hdr.c2h_data; 602 CU_ASSERT(c2h_data->datao == 0); 603 CU_ASSERT(c2h_data->datal = 300); 604 CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300); 605 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 606 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS); 607 608 CU_ASSERT(pdu.data_iovcnt == 3); 609 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 610 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 611 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 612 CU_ASSERT(pdu.data_iov[1].iov_len == 100); 613 CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE); 614 CU_ASSERT(pdu.data_iov[2].iov_len == 99); 615 616 tcp_req.pdu_in_use = false; 617 tcp_req.rsp.cdw0 = 1; 618 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 619 620 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 621 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 622 623 ttransport.tcp_opts.c2h_success = false; 624 tcp_req.pdu_in_use = false; 625 tcp_req.rsp.cdw0 = 0; 626 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 627 628 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 629 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 630 631 tcp_req.pdu_in_use = false; 632 tcp_req.rsp.cdw0 = 1; 633 nvmf_tcp_send_c2h_data(&tqpair, &tcp_req); 634 635 CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU); 636 CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0); 637 638 spdk_thread_exit(thread); 639 while (!spdk_thread_is_exited(thread)) { 640 spdk_thread_poll(thread, 0, 0); 641 } 642 spdk_thread_destroy(thread); 643 } 644 645 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024) 646 647 static void 648 test_nvmf_tcp_h2c_data_hdr_handle(void) 649 { 650 struct spdk_nvmf_tcp_transport ttransport = {}; 651 struct spdk_nvmf_tcp_qpair tqpair = {}; 652 struct nvme_tcp_pdu pdu = {}; 653 struct spdk_nvmf_tcp_req tcp_req = {}; 654 struct spdk_nvme_tcp_h2c_data_hdr *h2c_data; 655 656 /* Set qpair state to make unrelated operations NOP */ 657 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 658 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR; 659 tqpair.resource_count = 1; 660 tqpair.reqs = &tcp_req; 661 662 tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF; 663 tcp_req.req.iov[0].iov_len = 101; 664 tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF; 665 tcp_req.req.iov[1].iov_len = 99; 666 tcp_req.req.iovcnt = 2; 667 tcp_req.req.length = 200; 668 tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER; 669 670 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 671 tcp_req.req.cmd->nvme_cmd.cid = 1; 672 tcp_req.ttag = 1; 673 674 h2c_data = &pdu.hdr.h2c_data; 675 h2c_data->cccid = 1; 676 h2c_data->ttag = 1; 677 h2c_data->datao = 0; 678 h2c_data->datal = 200; 679 680 nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu); 681 682 CU_ASSERT(pdu.data_iovcnt == 2); 683 CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF); 684 CU_ASSERT(pdu.data_iov[0].iov_len == 101); 685 CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF); 686 CU_ASSERT(pdu.data_iov[1].iov_len == 99); 687 } 688 689 690 static void 691 test_nvmf_tcp_in_capsule_data_handle(void) 692 { 693 struct spdk_nvmf_tcp_transport ttransport = {}; 694 struct spdk_nvmf_transport_ops ops = {}; 695 struct spdk_nvmf_tcp_qpair tqpair = {}; 696 struct nvme_tcp_pdu *pdu, pdu_in_progress = {}; 697 union nvmf_c2h_msg rsp0 = {}; 698 union nvmf_c2h_msg rsp = {}; 699 700 struct spdk_nvmf_tcp_req tcp_req2 = {}; 701 struct spdk_nvmf_tcp_req tcp_req1 = {}; 702 703 struct spdk_nvme_tcp_cmd *capsule_data; 704 struct spdk_nvmf_capsule_cmd *nvmf_capsule_data; 705 struct spdk_nvme_sgl_descriptor *sgl; 706 707 struct spdk_nvmf_transport_poll_group *group; 708 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 709 struct spdk_sock_group grp = {}; 710 711 tqpair.pdu_in_progress = &pdu_in_progress; 712 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 713 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 714 ttransport.transport.ops = &ops; 715 ops.req_get_buffers_done = nvmf_tcp_req_get_buffers_done; 716 717 tcp_group.sock_group = &grp; 718 TAILQ_INIT(&tcp_group.qpairs); 719 group = &tcp_group.group; 720 group->transport = &ttransport.transport; 721 tqpair.group = &tcp_group; 722 723 TAILQ_INIT(&tqpair.tcp_req_free_queue); 724 TAILQ_INIT(&tqpair.tcp_req_working_queue); 725 726 TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link); 727 tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++; 728 tqpair.qpair.transport = &ttransport.transport; 729 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 730 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 731 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 732 733 /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */ 734 tcp_req2.req.qpair = &tqpair.qpair; 735 tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd; 736 tcp_req2.req.rsp = &rsp; 737 738 /* init tcp_req1 */ 739 tcp_req1.req.qpair = &tqpair.qpair; 740 tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd; 741 tcp_req1.req.rsp = &rsp0; 742 tcp_req1.state = TCP_REQUEST_STATE_NEW; 743 tcp_req1.req.data_from_pool = false; 744 745 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link); 746 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 747 748 /* init pdu, make pdu need sgl buff */ 749 pdu = tqpair.pdu_in_progress; 750 capsule_data = &pdu->hdr.capsule_cmd; 751 nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe; 752 sgl = &capsule_data->ccsqe.dptr.sgl1; 753 754 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 755 capsule_data->common.hlen = sizeof(*capsule_data); 756 capsule_data->common.plen = 1096; 757 capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC; 758 759 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 760 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 761 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 762 763 nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 764 765 /* pretend that tcp_req1 is waiting in the iobuf waiting queue */ 766 nvmf_tcp_req_process(&ttransport, &tcp_req1); 767 CU_ASSERT(tcp_req1.req.data_from_pool == false); 768 769 sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1; 770 771 /* process tqpair capsule req. */ 772 nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress); 773 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD); 774 CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2); 775 776 /* pretend that buffer for tcp_req1 becomes available */ 777 spdk_nvmf_request_get_buffers(&tcp_req1.req, group, &ttransport.transport, UT_IO_UNIT_SIZE - 1); 778 /* trigger callback as nvmf_request_iobuf_get_cb would */ 779 ttransport.transport.ops->req_get_buffers_done(&tcp_req1.req); 780 CU_ASSERT(tcp_req1.state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER); 781 } 782 783 static void 784 test_nvmf_tcp_qpair_init_mem_resource(void) 785 { 786 int rc; 787 struct spdk_nvmf_tcp_qpair *tqpair = NULL; 788 struct spdk_nvmf_transport transport = {}; 789 struct spdk_thread *thread; 790 791 thread = spdk_thread_create(NULL, NULL); 792 SPDK_CU_ASSERT_FATAL(thread != NULL); 793 spdk_set_thread(thread); 794 795 tqpair = calloc(1, sizeof(*tqpair)); 796 tqpair->qpair.transport = &transport; 797 798 nvmf_tcp_opts_init(&transport.opts); 799 CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 800 CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR); 801 CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE); 802 CU_ASSERT(transport.opts.max_io_size == SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE); 803 CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE); 804 CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH); 805 CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS); 806 CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE); 807 CU_ASSERT(transport.opts.dif_insert_or_strip == SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP); 808 CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC); 809 CU_ASSERT(transport.opts.transport_specific == NULL); 810 811 rc = nvmf_tcp_qpair_init(&tqpair->qpair); 812 CU_ASSERT(rc == 0); 813 CU_ASSERT(tqpair->host_hdgst_enable == true); 814 CU_ASSERT(tqpair->host_ddgst_enable == true); 815 816 rc = nvmf_tcp_qpair_init_mem_resource(tqpair); 817 CU_ASSERT(rc == 0); 818 CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 819 CU_ASSERT(tqpair->reqs != NULL); 820 CU_ASSERT(tqpair->bufs != NULL); 821 CU_ASSERT(tqpair->pdus != NULL); 822 /* Just to check the first and last entry */ 823 CU_ASSERT(tqpair->reqs[0].ttag == 1); 824 CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair); 825 CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]); 826 CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair); 827 CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs)); 828 CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp); 829 CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd); 830 CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE); 831 CU_ASSERT(tqpair->reqs[127].ttag == 128); 832 CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair); 833 CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]); 834 CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair); 835 CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096); 836 CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp); 837 CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd); 838 CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE); 839 CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH); 840 CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]); 841 CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair); 842 CU_ASSERT(tqpair->pdu_in_progress == 843 &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]); 844 CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 * 845 SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR); 846 847 /* Free all of tqpair resource */ 848 nvmf_tcp_qpair_destroy(tqpair); 849 850 spdk_thread_exit(thread); 851 while (!spdk_thread_is_exited(thread)) { 852 spdk_thread_poll(thread, 0, 0); 853 } 854 spdk_thread_destroy(thread); 855 } 856 857 static void 858 test_nvmf_tcp_send_c2h_term_req(void) 859 { 860 struct spdk_nvmf_tcp_qpair tqpair = {}; 861 struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {}; 862 enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD; 863 uint32_t error_offset = 1; 864 865 mgmt_pdu.qpair = &tqpair; 866 tqpair.mgmt_pdu = &mgmt_pdu; 867 tqpair.pdu_in_progress = &pdu_in_progress; 868 tqpair.tcp_pdu_working_count = 1; 869 870 /* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */ 871 pdu.hdr.common.hlen = 64; 872 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 873 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 874 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 875 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 876 pdu.hdr.common.hlen); 877 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 878 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 879 880 /* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */ 881 pdu.hdr.common.hlen = 255; 882 nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset); 883 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 884 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 885 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned) 886 tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 887 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 888 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD); 889 } 890 891 static void 892 test_nvmf_tcp_send_capsule_resp_pdu(void) 893 { 894 struct spdk_nvmf_tcp_req tcp_req = {}; 895 struct spdk_nvmf_tcp_qpair tqpair = {}; 896 struct nvme_tcp_pdu pdu = {}; 897 898 tcp_req.pdu_in_use = false; 899 tcp_req.req.qpair = &tqpair.qpair; 900 tcp_req.pdu = &pdu; 901 tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp; 902 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 903 tqpair.host_hdgst_enable = true; 904 905 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 906 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 907 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) + 908 SPDK_NVME_TCP_DIGEST_LEN); 909 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 910 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 911 sizeof(struct spdk_nvme_cpl))); 912 CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF); 913 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 914 CU_ASSERT(pdu.cb_arg == &tcp_req); 915 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 916 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN); 917 918 /* hdgst disable */ 919 tqpair.host_hdgst_enable = false; 920 tcp_req.pdu_in_use = false; 921 memset(&pdu, 0, sizeof(pdu)); 922 923 nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair); 924 CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP); 925 CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp)); 926 CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp)); 927 CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl, 928 sizeof(struct spdk_nvme_cpl))); 929 CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF)); 930 CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free); 931 CU_ASSERT(pdu.cb_arg == &tcp_req); 932 CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw); 933 CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp)); 934 } 935 936 static void 937 test_nvmf_tcp_icreq_handle(void) 938 { 939 struct spdk_nvmf_tcp_transport ttransport = {}; 940 struct spdk_nvmf_tcp_qpair tqpair = {}; 941 struct nvme_tcp_pdu pdu = {}; 942 struct nvme_tcp_pdu mgmt_pdu = {}; 943 struct nvme_tcp_pdu pdu_in_progress = {}; 944 struct spdk_nvme_tcp_ic_resp *ic_resp; 945 946 mgmt_pdu.qpair = &tqpair; 947 tqpair.mgmt_pdu = &mgmt_pdu; 948 tqpair.pdu_in_progress = &pdu_in_progress; 949 tqpair.tcp_pdu_working_count = 1; 950 951 /* case 1: Expected ICReq PFV 0 and got are different. */ 952 pdu.hdr.ic_req.pfv = 1; 953 954 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 955 956 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 957 958 /* case 2: Expected ICReq HPDA in range 0-31 and got are different. */ 959 pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1; 960 961 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 962 963 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 964 965 /* case 3: Expect: PASS. */ 966 ttransport.transport.opts.max_io_size = 32; 967 pdu.hdr.ic_req.pfv = 0; 968 tqpair.host_hdgst_enable = false; 969 tqpair.host_ddgst_enable = false; 970 tqpair.recv_buf_size = 64; 971 pdu.hdr.ic_req.hpda = 16; 972 973 nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu); 974 975 ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp; 976 CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE); 977 CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda); 978 CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP); 979 CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp)); 980 CU_ASSERT(ic_resp->common.plen == sizeof(struct spdk_nvme_tcp_ic_resp)); 981 CU_ASSERT(ic_resp->pfv == 0); 982 CU_ASSERT(ic_resp->cpda == tqpair.cpda); 983 CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size); 984 CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0); 985 CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0); 986 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 987 } 988 989 static void 990 test_nvmf_tcp_check_xfer_type(void) 991 { 992 const uint16_t cid = 0xAA; 993 struct spdk_nvmf_tcp_transport ttransport = {}; 994 struct spdk_nvmf_tcp_qpair tqpair = {}; 995 struct nvme_tcp_pdu pdu_in_progress = {}; 996 union nvmf_c2h_msg rsp0 = {}; 997 998 struct spdk_nvmf_tcp_req tcp_req = {}; 999 struct nvme_tcp_pdu rsp_pdu = {}; 1000 1001 struct spdk_nvme_tcp_cmd *capsule_data; 1002 struct spdk_nvme_sgl_descriptor *sgl; 1003 1004 struct spdk_nvmf_transport_poll_group *group; 1005 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1006 struct spdk_sock_group grp = {}; 1007 1008 tqpair.pdu_in_progress = &pdu_in_progress; 1009 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1010 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1011 1012 tcp_group.sock_group = &grp; 1013 TAILQ_INIT(&tcp_group.qpairs); 1014 group = &tcp_group.group; 1015 group->transport = &ttransport.transport; 1016 tqpair.group = &tcp_group; 1017 1018 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1019 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1020 1021 tqpair.qpair.transport = &ttransport.transport; 1022 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1023 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1024 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1025 1026 /* init tcp_req */ 1027 tcp_req.req.qpair = &tqpair.qpair; 1028 tcp_req.pdu = &rsp_pdu; 1029 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1030 tcp_req.req.rsp = &rsp0; 1031 tcp_req.state = TCP_REQUEST_STATE_NEW; 1032 1033 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1034 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1035 1036 /* init pdu, make pdu need sgl buff */ 1037 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1038 sgl = &capsule_data->ccsqe.dptr.sgl1; 1039 1040 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1041 capsule_data->common.hlen = sizeof(*capsule_data); 1042 capsule_data->common.plen = 1096; 1043 capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL; 1044 /* Need to set to a non zero valid to check it gets copied to the response */ 1045 capsule_data->ccsqe.cid = cid; 1046 1047 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1048 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1049 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1050 sgl->unkeyed.length = UT_IO_UNIT_SIZE; 1051 1052 /* Process a command and ensure that it fails and the request is set up to return an error */ 1053 nvmf_tcp_req_process(&ttransport, &tcp_req); 1054 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST); 1055 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY); 1056 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid); 1057 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC); 1058 CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE); 1059 } 1060 1061 static void 1062 test_nvmf_tcp_invalid_sgl(void) 1063 { 1064 const uint16_t cid = 0xAABB; 1065 struct spdk_nvmf_tcp_transport ttransport = {}; 1066 struct spdk_nvmf_tcp_qpair tqpair = {}; 1067 struct nvme_tcp_pdu pdu_in_progress = {}; 1068 union nvmf_c2h_msg rsp0 = {}; 1069 1070 struct spdk_nvmf_tcp_req tcp_req = {}; 1071 struct nvme_tcp_pdu rsp_pdu = {}; 1072 struct nvme_tcp_pdu mgmt_pdu = {}; 1073 1074 struct spdk_nvme_tcp_cmd *capsule_data; 1075 struct spdk_nvme_sgl_descriptor *sgl; 1076 1077 struct spdk_nvmf_transport_poll_group *group; 1078 struct spdk_nvmf_tcp_poll_group tcp_group = {}; 1079 struct spdk_sock_group grp = {}; 1080 1081 tqpair.pdu_in_progress = &pdu_in_progress; 1082 ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE; 1083 ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE; 1084 1085 tcp_group.sock_group = &grp; 1086 TAILQ_INIT(&tcp_group.qpairs); 1087 group = &tcp_group.group; 1088 group->transport = &ttransport.transport; 1089 tqpair.group = &tcp_group; 1090 1091 TAILQ_INIT(&tqpair.tcp_req_free_queue); 1092 TAILQ_INIT(&tqpair.tcp_req_working_queue); 1093 1094 tqpair.qpair.transport = &ttransport.transport; 1095 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1096 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH; 1097 tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED; 1098 1099 /* init tcp_req */ 1100 tcp_req.req.qpair = &tqpair.qpair; 1101 tcp_req.pdu = &rsp_pdu; 1102 tcp_req.pdu->qpair = &tqpair; 1103 tqpair.mgmt_pdu = &mgmt_pdu; 1104 tqpair.mgmt_pdu->qpair = &tqpair; 1105 tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd; 1106 tcp_req.req.rsp = &rsp0; 1107 tcp_req.state = TCP_REQUEST_STATE_NEW; 1108 1109 TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link); 1110 tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++; 1111 1112 /* init pdu, make pdu need sgl buff */ 1113 capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd; 1114 sgl = &capsule_data->ccsqe.dptr.sgl1; 1115 1116 capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1117 capsule_data->common.hlen = sizeof(*capsule_data); 1118 capsule_data->common.plen = 1096; 1119 capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE; 1120 /* Need to set to a non zero valid to check it gets copied to the response */ 1121 capsule_data->ccsqe.cid = cid; 1122 1123 /* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */ 1124 sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT; 1125 sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK; 1126 sgl->unkeyed.length = UT_MAX_IO_SIZE + 1; 1127 1128 /* Process a command and ensure that it fails and the request is set up to return an error */ 1129 nvmf_tcp_req_process(&ttransport, &tcp_req); 1130 CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER); 1131 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1132 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1133 } 1134 1135 static void 1136 test_nvmf_tcp_pdu_ch_handle(void) 1137 { 1138 struct spdk_nvmf_tcp_qpair tqpair = {}; 1139 struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {}; 1140 1141 mgmt_pdu.qpair = &tqpair; 1142 tqpair.mgmt_pdu = &mgmt_pdu; 1143 tqpair.pdu_in_progress = &pdu_in_progress; 1144 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1145 tqpair.cpda = 0; 1146 1147 /* Test case: Already received ICreq PDU. Expect: fail */ 1148 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1149 tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING; 1150 nvmf_tcp_pdu_ch_handle(&tqpair); 1151 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1152 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1153 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1154 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1155 1156 /* Test case: Expected PDU header length and received are different. Expect: fail */ 1157 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1158 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1159 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1160 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1161 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1162 nvmf_tcp_pdu_ch_handle(&tqpair); 1163 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1164 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1165 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1166 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1167 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2); 1168 1169 /* Test case: The TCP/IP tqpair connection is not negotiated. Expect: fail */ 1170 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1171 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP; 1172 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1173 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1174 tqpair.pdu_in_progress->hdr.common.hlen = 0; 1175 nvmf_tcp_pdu_ch_handle(&tqpair); 1176 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1177 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1178 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1179 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen); 1180 1181 /* Test case: Unexpected PDU type. Expect: fail */ 1182 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1183 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP; 1184 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1185 tqpair.pdu_in_progress->hdr.common.plen = 0; 1186 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1187 nvmf_tcp_pdu_ch_handle(&tqpair); 1188 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1189 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1190 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1191 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1192 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1193 1194 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */ 1195 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1196 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1197 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1198 tqpair.pdu_in_progress->hdr.common.plen = 0; 1199 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1200 nvmf_tcp_pdu_ch_handle(&tqpair); 1201 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1202 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1203 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1204 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1205 (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE); 1206 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1207 1208 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */ 1209 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1210 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1211 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1212 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1213 tqpair.pdu_in_progress->hdr.common.plen = 0; 1214 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1215 nvmf_tcp_pdu_ch_handle(&tqpair); 1216 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1217 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1218 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1219 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1220 struct spdk_nvme_tcp_term_req_hdr)); 1221 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1222 1223 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */ 1224 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1225 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1226 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1227 tqpair.pdu_in_progress->hdr.common.plen = 0; 1228 tqpair.pdu_in_progress->hdr.common.pdo = 64; 1229 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1230 nvmf_tcp_pdu_ch_handle(&tqpair); 1231 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1232 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1233 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1234 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1235 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1236 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1237 1238 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */ 1239 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1240 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ; 1241 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1242 tqpair.pdu_in_progress->hdr.common.plen = 0; 1243 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr); 1244 nvmf_tcp_pdu_ch_handle(&tqpair); 1245 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1246 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1247 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1248 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1249 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1250 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4); 1251 1252 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */ 1253 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1254 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD; 1255 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1256 tqpair.cpda = 1; 1257 tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF; 1258 tqpair.pdu_in_progress->hdr.common.plen = 0; 1259 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1260 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd); 1261 nvmf_tcp_pdu_ch_handle(&tqpair); 1262 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1263 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1264 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1265 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof( 1266 struct spdk_nvme_tcp_term_req_hdr)); 1267 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1268 1269 /* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */ 1270 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1271 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA; 1272 tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING; 1273 tqpair.cpda = 1; 1274 tqpair.pdu_in_progress->hdr.common.plen = 0; 1275 tqpair.pdu_in_progress->hdr.common.pdo = 63; 1276 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr); 1277 nvmf_tcp_pdu_ch_handle(&tqpair); 1278 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING); 1279 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ); 1280 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1281 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen + 1282 (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr)); 1283 CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3); 1284 1285 /* Test case: All parameters is conformed to the function. Expect: PASS */ 1286 tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH; 1287 tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ; 1288 tqpair.state = NVME_TCP_QPAIR_STATE_INVALID; 1289 tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req); 1290 tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req); 1291 nvmf_tcp_pdu_ch_handle(&tqpair); 1292 CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH); 1293 CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof( 1294 struct spdk_nvme_tcp_common_pdu_hdr)); 1295 } 1296 1297 static void 1298 test_nvmf_tcp_tls_add_remove_credentials(void) 1299 { 1300 struct spdk_thread *thread; 1301 struct spdk_nvmf_transport *transport; 1302 struct spdk_nvmf_tcp_transport *ttransport; 1303 struct spdk_nvmf_transport_opts opts; 1304 struct spdk_nvmf_subsystem subsystem; 1305 struct tcp_psk_entry *entry; 1306 struct spdk_sock_group grp = {}; 1307 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1308 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1309 const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:"; 1310 char *psk_file_path = "/tmp/psk.txt"; 1311 bool found = false; 1312 FILE *psk_file = NULL; 1313 mode_t oldmask; 1314 1315 thread = spdk_thread_create(NULL, NULL); 1316 SPDK_CU_ASSERT_FATAL(thread != NULL); 1317 spdk_set_thread(thread); 1318 1319 memset(&opts, 0, sizeof(opts)); 1320 opts.max_queue_depth = UT_MAX_QUEUE_DEPTH; 1321 opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR; 1322 opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE; 1323 opts.max_io_size = UT_MAX_IO_SIZE; 1324 opts.io_unit_size = UT_IO_UNIT_SIZE; 1325 opts.max_aq_depth = UT_MAX_AQ_DEPTH; 1326 opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS; 1327 MOCK_SET(spdk_sock_group_create, &grp); 1328 transport = nvmf_tcp_create(&opts); 1329 MOCK_CLEAR_P(spdk_sock_group_create); 1330 1331 memset(&subsystem, 0, sizeof(subsystem)); 1332 snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn); 1333 1334 /* Create a text file containing PSK in interchange format. */ 1335 oldmask = umask(S_IXUSR | S_IRWXG | S_IRWXO); 1336 psk_file = fopen(psk_file_path, "w"); 1337 CU_ASSERT(psk_file != NULL); 1338 CU_ASSERT(fprintf(psk_file, "%s", psk) > 0); 1339 CU_ASSERT(fclose(psk_file) == 0); 1340 umask(oldmask); 1341 1342 struct spdk_json_val psk_json[] = { 1343 {"", 2, SPDK_JSON_VAL_OBJECT_BEGIN}, 1344 {"psk", 3, SPDK_JSON_VAL_NAME}, 1345 {psk_file_path, strlen(psk_file_path), SPDK_JSON_VAL_STRING}, 1346 {"", 0, SPDK_JSON_VAL_OBJECT_END}, 1347 }; 1348 1349 nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json); 1350 1351 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1352 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1353 if ((strcmp(subnqn, entry->subnqn) == 0) && 1354 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1355 found = true; 1356 } 1357 } 1358 1359 CU_ASSERT(found == true); 1360 found = false; 1361 1362 nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn); 1363 1364 ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport); 1365 TAILQ_FOREACH(entry, &ttransport->psks, link) { 1366 if ((strcmp(subnqn, entry->subnqn) == 0) && 1367 (strcmp(hostnqn, entry->hostnqn) == 0)) { 1368 found = true; 1369 } 1370 } 1371 1372 CU_ASSERT(found == false); 1373 1374 CU_ASSERT(remove(psk_file_path) == 0); 1375 1376 CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0); 1377 1378 spdk_thread_exit(thread); 1379 while (!spdk_thread_is_exited(thread)) { 1380 spdk_thread_poll(thread, 0, 0); 1381 } 1382 spdk_thread_destroy(thread); 1383 } 1384 1385 static void 1386 test_nvmf_tcp_tls_generate_psk_id(void) 1387 { 1388 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1389 const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"}; 1390 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1391 char psk_id[NVMF_PSK_IDENTITY_LEN] = {}; 1392 char too_small_psk_id[5] = {}; 1393 1394 /* Check if we can generate expected PSK id. */ 1395 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1396 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0); 1397 CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0); 1398 1399 /* Test with a buffer that is too small to fit PSK id. */ 1400 CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn, 1401 subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0); 1402 1403 /* Test with unknown cipher suite. */ 1404 CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn, 1405 subnqn, UINT8_MAX) != 0); 1406 } 1407 1408 static void 1409 test_nvmf_tcp_tls_generate_retained_psk(void) 1410 { 1411 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1412 const char psk_reference1[] = {"1234567890ABCDEF"}; 1413 const char psk_reference2[] = {"FEDCBA0987654321"}; 1414 uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {}; 1415 uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {}; 1416 char *unhexlified1; 1417 char *unhexlified2; 1418 uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {}; 1419 uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {}; 1420 uint8_t too_small_psk_retained[5] = {}; 1421 int psk_retained_len1, psk_retained_len2; 1422 int retained_size; 1423 1424 unhexlified1 = spdk_unhexlify(psk_reference1); 1425 SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL); 1426 unhexlified2 = spdk_unhexlify(psk_reference2); 1427 SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL); 1428 1429 memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2); 1430 memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2); 1431 free(unhexlified1); 1432 free(unhexlified2); 1433 1434 /* Make sure that retained PSKs are different with different input PSKs and the same hash. */ 1435 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1436 psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1437 CU_ASSERT(retained_size > 0); 1438 1439 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn, 1440 psk_retained2, 1441 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0); 1442 CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0); 1443 1444 /* Make sure that retained PSKs are different with different hash and the same input PSKs. */ 1445 psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1446 hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1447 CU_ASSERT(psk_retained_len1 > 0); 1448 psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, 1449 hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384); 1450 CU_ASSERT(psk_retained_len2 > 0); 1451 CU_ASSERT(psk_retained_len1 < psk_retained_len2); 1452 1453 /* Make sure that passing unknown value as hash errors out the function. */ 1454 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1455 psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0); 1456 1457 /* Make sure that passing buffer insufficient in size errors out the function. */ 1458 CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn, 1459 too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0); 1460 } 1461 1462 static void 1463 test_nvmf_tcp_tls_generate_tls_psk(void) 1464 { 1465 const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"}; 1466 const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"}; 1467 const char psk_reference[] = {"1234567890ABCDEF"}; 1468 char *unhexlified; 1469 uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {}; 1470 uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {}; 1471 uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {}; 1472 uint8_t too_small_psk_tls[5] = {}; 1473 int retained_size, tls_size; 1474 1475 unhexlified = spdk_unhexlify(psk_reference); 1476 CU_ASSERT(unhexlified != NULL); 1477 1478 memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2); 1479 free(unhexlified); 1480 1481 retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn, 1482 psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256); 1483 CU_ASSERT(retained_size > 0); 1484 1485 /* Make sure that different cipher suites produce different TLS PSKs. */ 1486 tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1, 1487 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256); 1488 CU_ASSERT(tls_size > 0); 1489 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2, 1490 SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0); 1491 CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0); 1492 1493 /* Make sure that passing unknown value as hash errors out the function. */ 1494 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1495 psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0); 1496 1497 /* Make sure that passing buffer insufficient in size errors out the function. */ 1498 CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, 1499 too_small_psk_tls, sizeof(too_small_psk_tls), 1500 NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0); 1501 } 1502 1503 int 1504 main(int argc, char **argv) 1505 { 1506 CU_pSuite suite = NULL; 1507 unsigned int num_failures; 1508 1509 CU_initialize_registry(); 1510 1511 suite = CU_add_suite("nvmf", NULL, NULL); 1512 1513 CU_ADD_TEST(suite, test_nvmf_tcp_create); 1514 CU_ADD_TEST(suite, test_nvmf_tcp_destroy); 1515 CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create); 1516 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data); 1517 CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle); 1518 CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle); 1519 CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource); 1520 CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req); 1521 CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu); 1522 CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle); 1523 CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type); 1524 CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl); 1525 CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle); 1526 CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials); 1527 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id); 1528 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk); 1529 CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk); 1530 1531 num_failures = spdk_ut_run_tests(argc, argv, NULL); 1532 CU_cleanup_registry(); 1533 return num_failures; 1534 } 1535