xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 784b9d48746955f210926648a0131f84f58de76f)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 /* NVMF FC Transport Unit Test */
8 
9 #include "spdk/env.h"
10 #include "spdk_cunit.h"
11 #include "spdk/nvmf.h"
12 #include "spdk/endian.h"
13 #include "spdk/trace.h"
14 #include "spdk/log.h"
15 
16 #include "ut_multithread.c"
17 
18 #include "transport.h"
19 #include "nvmf_internal.h"
20 
21 #include "nvmf_fc.h"
22 
23 #include "json/json_util.c"
24 #include "json/json_write.c"
25 #include "nvmf/nvmf.c"
26 #include "nvmf/transport.c"
27 #include "spdk/bdev_module.h"
28 #include "nvmf/subsystem.c"
29 #include "nvmf/fc.c"
30 #include "nvmf/fc_ls.c"
31 
32 /*
33  * SPDK Stuff
34  */
35 
36 #ifdef SPDK_CONFIG_RDMA
37 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
38 	.type = SPDK_NVME_TRANSPORT_RDMA,
39 	.opts_init = NULL,
40 	.create = NULL,
41 	.destroy = NULL,
42 
43 	.listen = NULL,
44 	.stop_listen = NULL,
45 	.accept = NULL,
46 
47 	.listener_discover = NULL,
48 
49 	.poll_group_create = NULL,
50 	.poll_group_destroy = NULL,
51 	.poll_group_add = NULL,
52 	.poll_group_poll = NULL,
53 
54 	.req_free = NULL,
55 	.req_complete = NULL,
56 
57 	.qpair_fini = NULL,
58 	.qpair_get_peer_trid = NULL,
59 	.qpair_get_local_trid = NULL,
60 	.qpair_get_listen_trid = NULL,
61 };
62 #endif
63 
64 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
65 	.type = SPDK_NVME_TRANSPORT_TCP,
66 };
67 
68 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
69 	    (const struct spdk_nvme_transport_id *trid1,
70 	     const struct spdk_nvme_transport_id *trid2), 0);
71 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
72 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
73 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
74 DEFINE_STUB_V(nvmf_qpair_abort_pending_zcopy_reqs, (struct spdk_nvmf_qpair *qpair));
75 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
76 	    NULL);
77 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
78 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
79 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
80 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
81 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
82 	     struct spdk_bdev_module *module), 0);
83 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
84 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
85 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
86 
87 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
88 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
89 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
90 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
91 		enum spdk_nvme_transport_type trtype));
92 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
93 		struct spdk_nvmf_ctrlr_data *cdata));
94 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
95 	    -ENOSPC);
96 
97 DEFINE_STUB_V(nvmf_update_discovery_log,
98 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
99 
100 DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params),
101 	    (void *)1);
102 DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0);
103 DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data),
104 	    -ENOENT);
105 DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0);
106 DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h));
107 DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0);
108 DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0);
109 
110 DEFINE_STUB_V(spdk_nvmf_request_zcopy_start, (struct spdk_nvmf_request *req));
111 DEFINE_STUB_V(spdk_nvmf_request_zcopy_end, (struct spdk_nvmf_request *req, bool commit));
112 
113 const char *
114 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
115 {
116 	switch (trtype) {
117 	case SPDK_NVME_TRANSPORT_PCIE:
118 		return "PCIe";
119 	case SPDK_NVME_TRANSPORT_RDMA:
120 		return "RDMA";
121 	case SPDK_NVME_TRANSPORT_FC:
122 		return "FC";
123 	default:
124 		return NULL;
125 	}
126 }
127 
128 const char *
129 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
130 {
131 	switch (adrfam) {
132 	case SPDK_NVMF_ADRFAM_IPV4:
133 		return "IPv4";
134 	case SPDK_NVMF_ADRFAM_IPV6:
135 		return "IPv6";
136 	case SPDK_NVMF_ADRFAM_IB:
137 		return "IB";
138 	case SPDK_NVMF_ADRFAM_FC:
139 		return "FC";
140 	default:
141 		return NULL;
142 	}
143 }
144 
145 const struct spdk_uuid *
146 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
147 {
148 	return &bdev->uuid;
149 }
150 
151 static bool g_lld_init_called = false;
152 
153 int
154 nvmf_fc_lld_init(void)
155 {
156 	g_lld_init_called = true;
157 	return 0;
158 }
159 
160 static bool g_lld_fini_called = false;
161 
162 void
163 nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx)
164 {
165 	g_lld_fini_called = true;
166 }
167 
168 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
169 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
170 DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
171 DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
172 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
173 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
174 	    0);
175 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
176 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
177 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
178 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
179 				   uint32_t ersp_len), 0);
180 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
181 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
182 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
183 				       struct spdk_nvmf_fc_xchg *xri,
184 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
185 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
186 				       uint16_t ox_id, uint16_t rx_id,
187 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
188 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
189 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
190 		size_t rsp_len), NULL);
191 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
192 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
193 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
194 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
195 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
196 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
197 					uint16_t skip_rq), 0);
198 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
199 		uint64_t *conn_id, uint32_t sq_size), true);
200 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
201 	    (struct spdk_nvmf_fc_hwqp *queues,
202 	     uint32_t num_queues, uint64_t conn_id), NULL);
203 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
204 					struct spdk_nvmf_fc_hwqp *io_queues,
205 					uint32_t num_io_queues,
206 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
207 DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
208 				     struct spdk_nvmf_fc_xchg_info *info));
209 DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
210 
211 uint32_t
212 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
213 {
214 	hwqp->lcore_id++;
215 	return 0; /* always return 0 or else it will poll forever */
216 }
217 
218 struct spdk_nvmf_fc_xchg *
219 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
220 {
221 	static struct spdk_nvmf_fc_xchg xchg;
222 
223 	xchg.xchg_id = 1;
224 	return &xchg;
225 }
226 
227 #define MAX_FC_UT_POLL_THREADS 8
228 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
229 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
230 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
231 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
232 uint8_t g_fc_port_handle = 0xff;
233 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
234 
235 static void
236 _add_transport_done(void *arg, int status)
237 {
238 	CU_ASSERT(status == 0);
239 }
240 
241 static void
242 _add_transport_done_dup_err(void *arg, int status)
243 {
244 	CU_ASSERT(status == -EEXIST);
245 }
246 
247 static void
248 create_transport_test(void)
249 {
250 	const struct spdk_nvmf_transport_ops *ops = NULL;
251 	struct spdk_nvmf_transport_opts opts = { 0 };
252 	struct spdk_nvmf_target_opts tgt_opts = {
253 		.name = "nvmf_test_tgt",
254 		.max_subsystems = 0
255 	};
256 
257 	allocate_threads(8);
258 	set_thread(0);
259 
260 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
261 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
262 
263 	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
264 	SPDK_CU_ASSERT_FATAL(ops != NULL);
265 
266 	ops->opts_init(&opts);
267 
268 	g_lld_init_called = false;
269 	opts.opts_size = sizeof(opts);
270 	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
271 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
272 
273 	CU_ASSERT(g_lld_init_called == true);
274 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
275 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
276 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
277 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
278 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
279 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
280 
281 	set_thread(0);
282 
283 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
284 				    _add_transport_done, 0);
285 	poll_thread(0);
286 
287 	/* Add transport again - should get error */
288 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
289 				    _add_transport_done_dup_err, 0);
290 	poll_thread(0);
291 
292 	/* create transport with bad args/options */
293 #ifndef SPDK_CONFIG_RDMA
294 	CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
295 #endif
296 	CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
297 	opts.max_io_size = 1024 ^ 3;
298 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
299 	opts.max_io_size = 999;
300 	opts.io_unit_size = 1024;
301 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
302 }
303 
304 static void
305 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
306 {
307 	CU_ASSERT(err == 0);
308 	CU_ASSERT(port_handle == 2);
309 	g_fc_port_handle = port_handle;
310 }
311 
312 static void
313 create_fc_port_test(void)
314 {
315 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
316 	struct spdk_nvmf_fc_port *fc_port = NULL;
317 	int err;
318 
319 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
320 
321 	init_args.port_handle = 2;
322 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
323 	init_args.ls_queue_size = 100;
324 	init_args.io_queue_size = 100;
325 	init_args.io_queues = (void *)lld_q;
326 
327 	set_thread(0);
328 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
329 	CU_ASSERT(err == 0);
330 	poll_thread(0);
331 
332 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
333 	CU_ASSERT(fc_port != NULL);
334 }
335 
336 static void
337 online_fc_port_test(void)
338 {
339 	struct spdk_nvmf_fc_port *fc_port;
340 	struct spdk_nvmf_fc_hw_port_online_args args;
341 	int err;
342 
343 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
344 
345 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
346 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
347 
348 	set_thread(0);
349 	args.port_handle = g_fc_port_handle;
350 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
351 	CU_ASSERT(err == 0);
352 	poll_threads();
353 	set_thread(0);
354 	if (err == 0) {
355 		uint32_t i;
356 		for (i = 0; i < fc_port->num_io_queues; i++) {
357 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
358 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
359 			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
360 		}
361 	}
362 }
363 
364 static void
365 create_poll_groups_test(void)
366 {
367 	unsigned i;
368 
369 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
370 
371 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
372 		set_thread(i);
373 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
374 		poll_thread(i);
375 		CU_ASSERT(g_poll_groups[i] != NULL);
376 	}
377 	set_thread(0);
378 }
379 
380 static void
381 poll_group_poll_test(void)
382 {
383 	unsigned i;
384 	unsigned poll_cnt =  10;
385 	struct spdk_nvmf_fc_port *fc_port = NULL;
386 
387 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
388 
389 	set_thread(0);
390 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
391 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
392 
393 	for (i = 0; i < fc_port->num_io_queues; i++) {
394 		fc_port->io_queues[i].lcore_id = 0;
395 	}
396 
397 	for (i = 0; i < poll_cnt; i++) {
398 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
399 		poll_threads();
400 	}
401 
402 	/* check if hwqp's lcore_id has been updated */
403 	for (i = 0; i < fc_port->num_io_queues; i++) {
404 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
405 	}
406 }
407 
408 static void
409 remove_hwqps_from_poll_groups_test(void)
410 {
411 	unsigned i;
412 	struct spdk_nvmf_fc_port *fc_port = NULL;
413 
414 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
415 
416 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
417 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
418 
419 	for (i = 0; i < fc_port->num_io_queues; i++) {
420 		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL);
421 		poll_threads();
422 		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
423 	}
424 }
425 
426 static void
427 destroy_transport_test(void)
428 {
429 	unsigned i;
430 
431 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
432 
433 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
434 		set_thread(i);
435 		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
436 		poll_thread(0);
437 	}
438 
439 	set_thread(0);
440 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
441 	g_lld_fini_called = false;
442 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
443 	poll_threads();
444 	CU_ASSERT(g_lld_fini_called == true);
445 }
446 
447 static int
448 nvmf_fc_tests_init(void)
449 {
450 	return 0;
451 }
452 
453 static int
454 nvmf_fc_tests_fini(void)
455 {
456 	free_threads();
457 	return 0;
458 }
459 
460 int
461 main(int argc, char **argv)
462 {
463 	unsigned int num_failures = 0;
464 	CU_pSuite suite = NULL;
465 
466 	CU_set_error_action(CUEA_ABORT);
467 	CU_initialize_registry();
468 
469 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
470 
471 	CU_ADD_TEST(suite, create_transport_test);
472 	CU_ADD_TEST(suite, create_poll_groups_test);
473 	CU_ADD_TEST(suite, create_fc_port_test);
474 	CU_ADD_TEST(suite, online_fc_port_test);
475 	CU_ADD_TEST(suite, poll_group_poll_test);
476 	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
477 	CU_ADD_TEST(suite, destroy_transport_test);
478 
479 	CU_basic_set_mode(CU_BRM_VERBOSE);
480 	CU_basic_run_tests();
481 	num_failures = CU_get_number_of_failures();
482 	CU_cleanup_registry();
483 
484 	return num_failures;
485 }
486