1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * Copyright (c) 2018-2019 Broadcom. All Rights Reserved. 4 * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. 5 */ 6 7 /* NVMF FC Transport Unit Test */ 8 9 #include "spdk/env.h" 10 #include "spdk_internal/cunit.h" 11 #include "spdk/nvmf.h" 12 #include "spdk/endian.h" 13 #include "spdk/trace.h" 14 #include "spdk/log.h" 15 #include "spdk/util.h" 16 17 #include "ut_multithread.c" 18 19 #include "transport.h" 20 #include "nvmf_internal.h" 21 22 #include "nvmf_fc.h" 23 24 #include "json/json_util.c" 25 #include "json/json_write.c" 26 #include "nvmf/nvmf.c" 27 #include "nvmf/transport.c" 28 #include "spdk/bdev_module.h" 29 #include "nvmf/subsystem.c" 30 #include "nvmf/fc.c" 31 #include "nvmf/fc_ls.c" 32 33 /* 34 * SPDK Stuff 35 */ 36 37 DEFINE_STUB(spdk_nvme_transport_id_compare, int, 38 (const struct spdk_nvme_transport_id *trid1, 39 const struct spdk_nvme_transport_id *trid2), 0); 40 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test"); 41 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr)); 42 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair)); 43 DEFINE_STUB_V(nvmf_qpair_abort_pending_zcopy_reqs, (struct spdk_nvmf_qpair *qpair)); 44 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 45 NULL); 46 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req)); 47 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)); 48 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc)); 49 DEFINE_STUB(spdk_bdev_module_claim_bdev, int, 50 (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, 51 struct spdk_bdev_module *module), 0); 52 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev)); 53 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512); 54 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024); 55 56 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0); 57 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int, 58 (struct spdk_nvmf_ctrlr *ctrlr), 0); 59 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid, 60 enum spdk_nvme_transport_type trtype)); 61 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 62 -ENOSPC); 63 64 DEFINE_STUB_V(spdk_nvmf_send_discovery_log_notice, 65 (struct spdk_nvmf_tgt *tgt, const char *hostnqn)); 66 67 DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params), 68 (void *)1); 69 DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0); 70 DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data), 71 -ENOENT); 72 DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0); 73 DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h)); 74 DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0); 75 DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0); 76 77 DEFINE_STUB_V(spdk_nvmf_request_zcopy_start, (struct spdk_nvmf_request *req)); 78 DEFINE_STUB_V(spdk_nvmf_request_zcopy_end, (struct spdk_nvmf_request *req, bool commit)); 79 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL); 80 81 const char * 82 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype) 83 { 84 switch (trtype) { 85 case SPDK_NVME_TRANSPORT_PCIE: 86 return "PCIe"; 87 case SPDK_NVME_TRANSPORT_RDMA: 88 return "RDMA"; 89 case SPDK_NVME_TRANSPORT_FC: 90 return "FC"; 91 default: 92 return NULL; 93 } 94 } 95 96 const char * 97 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam) 98 { 99 switch (adrfam) { 100 case SPDK_NVMF_ADRFAM_IPV4: 101 return "IPv4"; 102 case SPDK_NVMF_ADRFAM_IPV6: 103 return "IPv6"; 104 case SPDK_NVMF_ADRFAM_IB: 105 return "IB"; 106 case SPDK_NVMF_ADRFAM_FC: 107 return "FC"; 108 default: 109 return NULL; 110 } 111 } 112 113 const struct spdk_uuid * 114 spdk_bdev_get_uuid(const struct spdk_bdev *bdev) 115 { 116 return &bdev->uuid; 117 } 118 119 static bool g_lld_init_called = false; 120 121 int 122 nvmf_fc_lld_init(void) 123 { 124 g_lld_init_called = true; 125 return 0; 126 } 127 128 static bool g_lld_fini_called = false; 129 130 void 131 nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx) 132 { 133 g_lld_fini_called = true; 134 } 135 136 DEFINE_STUB_V(nvmf_fc_lld_start, (void)); 137 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0); 138 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0); 139 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri), 140 0); 141 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0); 142 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0); 143 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx)); 144 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf, 145 uint32_t ersp_len), 0); 146 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport, 147 struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0); 148 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp, 149 struct spdk_nvmf_fc_xchg *xri, 150 spdk_nvmf_fc_caller_cb cb, void *cb_args), 0); 151 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp, 152 uint16_t ox_id, uint16_t rx_id, 153 uint16_t rpi, bool rjt, uint8_t rjt_exp, 154 spdk_nvmf_fc_caller_cb cb, void *cb_args), 0); 155 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len, 156 size_t rsp_len), NULL); 157 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs)); 158 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp, 159 struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs, 160 spdk_nvmf_fc_caller_cb cb, void *cb_args), 0); 161 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true); 162 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id, 163 uint16_t skip_rq), 0); 164 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp, 165 uint64_t *conn_id, uint32_t sq_size), true); 166 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *, 167 (struct spdk_nvmf_fc_hwqp *queues, 168 uint32_t num_queues, uint64_t conn_id), NULL); 169 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue, 170 struct spdk_nvmf_fc_hwqp *io_queues, 171 uint32_t num_io_queues, 172 struct spdk_nvmf_fc_queue_dump_info *dump_info)); 173 174 uint32_t 175 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp) 176 { 177 hwqp->lcore_id++; 178 return 0; /* always return 0 or else it will poll forever */ 179 } 180 181 struct spdk_nvmf_fc_xchg * 182 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp) 183 { 184 static struct spdk_nvmf_fc_xchg xchg; 185 186 xchg.xchg_id = 1; 187 return &xchg; 188 } 189 190 #define MAX_FC_UT_POLL_THREADS 8 191 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0}; 192 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS 193 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL; 194 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL; 195 uint8_t g_fc_port_handle = 0xff; 196 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS]; 197 198 static void 199 _add_transport_done(void *arg, int status) 200 { 201 CU_ASSERT(status == 0); 202 } 203 204 static void 205 _add_transport_done_dup_err(void *arg, int status) 206 { 207 CU_ASSERT(status == -EEXIST); 208 } 209 210 static void 211 create_transport_test(void) 212 { 213 const struct spdk_nvmf_transport_ops *ops = NULL; 214 struct spdk_nvmf_transport_opts opts = { 0 }; 215 struct spdk_nvmf_target_opts tgt_opts = { 216 .size = SPDK_SIZEOF(&opts, discovery_filter), 217 .name = "nvmf_test_tgt", 218 .max_subsystems = 0 219 }; 220 221 allocate_threads(8); 222 set_thread(0); 223 224 g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts); 225 SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL); 226 227 ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC); 228 SPDK_CU_ASSERT_FATAL(ops != NULL); 229 230 ops->opts_init(&opts); 231 232 g_lld_init_called = false; 233 opts.opts_size = sizeof(opts); 234 g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts); 235 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 236 237 CU_ASSERT(g_lld_init_called == true); 238 CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth); 239 CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr); 240 CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size); 241 CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size); 242 CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size); 243 CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth); 244 245 set_thread(0); 246 247 spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt, 248 _add_transport_done, 0); 249 poll_thread(0); 250 251 /* Add transport again - should get error */ 252 spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt, 253 _add_transport_done_dup_err, 0); 254 poll_thread(0); 255 256 /* create transport with bad args/options */ 257 opts.max_io_size = 1024 ^ 3; 258 CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL); 259 opts.max_io_size = 999; 260 opts.io_unit_size = 1024; 261 CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL); 262 } 263 264 static void 265 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err) 266 { 267 CU_ASSERT(err == 0); 268 CU_ASSERT(port_handle == 2); 269 g_fc_port_handle = port_handle; 270 } 271 272 static void 273 create_fc_port_test(void) 274 { 275 struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 }; 276 struct spdk_nvmf_fc_port *fc_port = NULL; 277 int err; 278 279 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 280 281 init_args.port_handle = 2; 282 init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count()); 283 init_args.ls_queue_size = 100; 284 init_args.io_queue_size = 100; 285 init_args.io_queues = (void *)lld_q; 286 287 set_thread(0); 288 err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb); 289 CU_ASSERT(err == 0); 290 poll_thread(0); 291 292 fc_port = nvmf_fc_port_lookup(g_fc_port_handle); 293 CU_ASSERT(fc_port != NULL); 294 } 295 296 static void 297 online_fc_port_test(void) 298 { 299 struct spdk_nvmf_fc_port *fc_port; 300 struct spdk_nvmf_fc_hw_port_online_args args; 301 int err; 302 303 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 304 305 fc_port = nvmf_fc_port_lookup(g_fc_port_handle); 306 SPDK_CU_ASSERT_FATAL(fc_port != NULL); 307 308 set_thread(0); 309 args.port_handle = g_fc_port_handle; 310 err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb); 311 CU_ASSERT(err == 0); 312 poll_threads(); 313 set_thread(0); 314 if (err == 0) { 315 uint32_t i; 316 for (i = 0; i < fc_port->num_io_queues; i++) { 317 CU_ASSERT(fc_port->io_queues[i].fgroup != 0); 318 CU_ASSERT(fc_port->io_queues[i].fgroup != 0); 319 CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0); 320 } 321 } 322 } 323 324 static void 325 create_poll_groups_test(void) 326 { 327 unsigned i; 328 329 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 330 331 for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) { 332 set_thread(i); 333 g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt); 334 poll_thread(i); 335 CU_ASSERT(g_poll_groups[i] != NULL); 336 } 337 set_thread(0); 338 } 339 340 static void 341 poll_group_poll_test(void) 342 { 343 unsigned i; 344 unsigned poll_cnt = 10; 345 struct spdk_nvmf_fc_port *fc_port = NULL; 346 347 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 348 349 set_thread(0); 350 fc_port = nvmf_fc_port_lookup(g_fc_port_handle); 351 SPDK_CU_ASSERT_FATAL(fc_port != NULL); 352 353 for (i = 0; i < fc_port->num_io_queues; i++) { 354 fc_port->io_queues[i].lcore_id = 0; 355 } 356 357 for (i = 0; i < poll_cnt; i++) { 358 /* this should cause spdk_nvmf_fc_poll_group_poll to be called() */ 359 poll_threads(); 360 } 361 362 /* check if hwqp's lcore_id has been updated */ 363 for (i = 0; i < fc_port->num_io_queues; i++) { 364 CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt); 365 } 366 } 367 368 static void 369 remove_hwqps_from_poll_groups_test(void) 370 { 371 unsigned i; 372 struct spdk_nvmf_fc_port *fc_port = NULL; 373 374 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 375 376 fc_port = nvmf_fc_port_lookup(g_fc_port_handle); 377 SPDK_CU_ASSERT_FATAL(fc_port != NULL); 378 379 for (i = 0; i < fc_port->num_io_queues; i++) { 380 nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL); 381 poll_threads(); 382 CU_ASSERT(fc_port->io_queues[i].fgroup == 0); 383 } 384 } 385 386 static void 387 destroy_transport_test(void) 388 { 389 unsigned i; 390 391 SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL); 392 393 for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) { 394 set_thread(i); 395 spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL); 396 poll_thread(0); 397 } 398 399 set_thread(0); 400 SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL); 401 g_lld_fini_called = false; 402 spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL); 403 poll_threads(); 404 CU_ASSERT(g_lld_fini_called == true); 405 } 406 407 static int 408 nvmf_fc_tests_init(void) 409 { 410 return 0; 411 } 412 413 static int 414 nvmf_fc_tests_fini(void) 415 { 416 free_threads(); 417 return 0; 418 } 419 420 int 421 main(int argc, char **argv) 422 { 423 unsigned int num_failures = 0; 424 CU_pSuite suite = NULL; 425 426 CU_initialize_registry(); 427 428 suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini); 429 430 CU_ADD_TEST(suite, create_transport_test); 431 CU_ADD_TEST(suite, create_poll_groups_test); 432 CU_ADD_TEST(suite, create_fc_port_test); 433 CU_ADD_TEST(suite, online_fc_port_test); 434 CU_ADD_TEST(suite, poll_group_poll_test); 435 CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test); 436 CU_ADD_TEST(suite, destroy_transport_test); 437 438 num_failures = spdk_ut_run_tests(argc, argv, NULL); 439 CU_cleanup_registry(); 440 441 return num_failures; 442 } 443