xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 60982c759db49b4f4579f16e3b24df0725ba4b94)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 /* NVMF FC Transport Unit Test */
8 
9 #include "spdk/env.h"
10 #include "spdk_internal/cunit.h"
11 #include "spdk/nvmf.h"
12 #include "spdk/endian.h"
13 #include "spdk/trace.h"
14 #include "spdk/log.h"
15 
16 #include "ut_multithread.c"
17 
18 #include "transport.h"
19 #include "nvmf_internal.h"
20 
21 #include "nvmf_fc.h"
22 
23 #include "json/json_util.c"
24 #include "json/json_write.c"
25 #include "nvmf/nvmf.c"
26 #include "nvmf/transport.c"
27 #include "spdk/bdev_module.h"
28 #include "nvmf/subsystem.c"
29 #include "nvmf/fc.c"
30 #include "nvmf/fc_ls.c"
31 
32 /*
33  * SPDK Stuff
34  */
35 
36 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
37 	    (const struct spdk_nvme_transport_id *trid1,
38 	     const struct spdk_nvme_transport_id *trid2), 0);
39 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
40 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
41 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
42 DEFINE_STUB_V(nvmf_qpair_abort_pending_zcopy_reqs, (struct spdk_nvmf_qpair *qpair));
43 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
44 	    NULL);
45 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
46 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
47 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
48 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
49 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
50 	     struct spdk_bdev_module *module), 0);
51 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
52 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
53 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
54 
55 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
56 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
57 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
58 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
59 		enum spdk_nvme_transport_type trtype));
60 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
61 		struct spdk_nvmf_ctrlr_data *cdata));
62 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
63 	    -ENOSPC);
64 
65 DEFINE_STUB_V(nvmf_update_discovery_log,
66 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
67 
68 DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params),
69 	    (void *)1);
70 DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0);
71 DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data),
72 	    -ENOENT);
73 DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0);
74 DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h));
75 DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0);
76 DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0);
77 
78 DEFINE_STUB_V(spdk_nvmf_request_zcopy_start, (struct spdk_nvmf_request *req));
79 DEFINE_STUB_V(spdk_nvmf_request_zcopy_end, (struct spdk_nvmf_request *req, bool commit));
80 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
81 
82 const char *
83 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
84 {
85 	switch (trtype) {
86 	case SPDK_NVME_TRANSPORT_PCIE:
87 		return "PCIe";
88 	case SPDK_NVME_TRANSPORT_RDMA:
89 		return "RDMA";
90 	case SPDK_NVME_TRANSPORT_FC:
91 		return "FC";
92 	default:
93 		return NULL;
94 	}
95 }
96 
97 const char *
98 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
99 {
100 	switch (adrfam) {
101 	case SPDK_NVMF_ADRFAM_IPV4:
102 		return "IPv4";
103 	case SPDK_NVMF_ADRFAM_IPV6:
104 		return "IPv6";
105 	case SPDK_NVMF_ADRFAM_IB:
106 		return "IB";
107 	case SPDK_NVMF_ADRFAM_FC:
108 		return "FC";
109 	default:
110 		return NULL;
111 	}
112 }
113 
114 const struct spdk_uuid *
115 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
116 {
117 	return &bdev->uuid;
118 }
119 
120 static bool g_lld_init_called = false;
121 
122 int
123 nvmf_fc_lld_init(void)
124 {
125 	g_lld_init_called = true;
126 	return 0;
127 }
128 
129 static bool g_lld_fini_called = false;
130 
131 void
132 nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx)
133 {
134 	g_lld_fini_called = true;
135 }
136 
137 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
138 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
139 DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
140 DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
141 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
142 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
143 	    0);
144 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
145 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
146 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
147 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
148 				   uint32_t ersp_len), 0);
149 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
150 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
151 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
152 				       struct spdk_nvmf_fc_xchg *xri,
153 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
154 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
155 				       uint16_t ox_id, uint16_t rx_id,
156 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
157 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
158 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
159 		size_t rsp_len), NULL);
160 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
161 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
162 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
163 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
164 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
165 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
166 					uint16_t skip_rq), 0);
167 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
168 		uint64_t *conn_id, uint32_t sq_size), true);
169 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
170 	    (struct spdk_nvmf_fc_hwqp *queues,
171 	     uint32_t num_queues, uint64_t conn_id), NULL);
172 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
173 					struct spdk_nvmf_fc_hwqp *io_queues,
174 					uint32_t num_io_queues,
175 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
176 DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
177 				     struct spdk_nvmf_fc_xchg_info *info));
178 DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
179 
180 uint32_t
181 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
182 {
183 	hwqp->lcore_id++;
184 	return 0; /* always return 0 or else it will poll forever */
185 }
186 
187 struct spdk_nvmf_fc_xchg *
188 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
189 {
190 	static struct spdk_nvmf_fc_xchg xchg;
191 
192 	xchg.xchg_id = 1;
193 	return &xchg;
194 }
195 
196 #define MAX_FC_UT_POLL_THREADS 8
197 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
198 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
199 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
200 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
201 uint8_t g_fc_port_handle = 0xff;
202 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
203 
204 static void
205 _add_transport_done(void *arg, int status)
206 {
207 	CU_ASSERT(status == 0);
208 }
209 
210 static void
211 _add_transport_done_dup_err(void *arg, int status)
212 {
213 	CU_ASSERT(status == -EEXIST);
214 }
215 
216 static void
217 create_transport_test(void)
218 {
219 	const struct spdk_nvmf_transport_ops *ops = NULL;
220 	struct spdk_nvmf_transport_opts opts = { 0 };
221 	struct spdk_nvmf_target_opts tgt_opts = {
222 		.name = "nvmf_test_tgt",
223 		.max_subsystems = 0
224 	};
225 
226 	allocate_threads(8);
227 	set_thread(0);
228 
229 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
230 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
231 
232 	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
233 	SPDK_CU_ASSERT_FATAL(ops != NULL);
234 
235 	ops->opts_init(&opts);
236 
237 	g_lld_init_called = false;
238 	opts.opts_size = sizeof(opts);
239 	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
240 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
241 
242 	CU_ASSERT(g_lld_init_called == true);
243 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
244 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
245 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
246 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
247 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
248 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
249 
250 	set_thread(0);
251 
252 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
253 				    _add_transport_done, 0);
254 	poll_thread(0);
255 
256 	/* Add transport again - should get error */
257 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
258 				    _add_transport_done_dup_err, 0);
259 	poll_thread(0);
260 
261 	/* create transport with bad args/options */
262 	opts.max_io_size = 1024 ^ 3;
263 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
264 	opts.max_io_size = 999;
265 	opts.io_unit_size = 1024;
266 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
267 }
268 
269 static void
270 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
271 {
272 	CU_ASSERT(err == 0);
273 	CU_ASSERT(port_handle == 2);
274 	g_fc_port_handle = port_handle;
275 }
276 
277 static void
278 create_fc_port_test(void)
279 {
280 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
281 	struct spdk_nvmf_fc_port *fc_port = NULL;
282 	int err;
283 
284 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
285 
286 	init_args.port_handle = 2;
287 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
288 	init_args.ls_queue_size = 100;
289 	init_args.io_queue_size = 100;
290 	init_args.io_queues = (void *)lld_q;
291 
292 	set_thread(0);
293 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
294 	CU_ASSERT(err == 0);
295 	poll_thread(0);
296 
297 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
298 	CU_ASSERT(fc_port != NULL);
299 }
300 
301 static void
302 online_fc_port_test(void)
303 {
304 	struct spdk_nvmf_fc_port *fc_port;
305 	struct spdk_nvmf_fc_hw_port_online_args args;
306 	int err;
307 
308 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
309 
310 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
311 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
312 
313 	set_thread(0);
314 	args.port_handle = g_fc_port_handle;
315 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
316 	CU_ASSERT(err == 0);
317 	poll_threads();
318 	set_thread(0);
319 	if (err == 0) {
320 		uint32_t i;
321 		for (i = 0; i < fc_port->num_io_queues; i++) {
322 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
323 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
324 			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
325 		}
326 	}
327 }
328 
329 static void
330 create_poll_groups_test(void)
331 {
332 	unsigned i;
333 
334 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
335 
336 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
337 		set_thread(i);
338 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
339 		poll_thread(i);
340 		CU_ASSERT(g_poll_groups[i] != NULL);
341 	}
342 	set_thread(0);
343 }
344 
345 static void
346 poll_group_poll_test(void)
347 {
348 	unsigned i;
349 	unsigned poll_cnt =  10;
350 	struct spdk_nvmf_fc_port *fc_port = NULL;
351 
352 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
353 
354 	set_thread(0);
355 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
356 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
357 
358 	for (i = 0; i < fc_port->num_io_queues; i++) {
359 		fc_port->io_queues[i].lcore_id = 0;
360 	}
361 
362 	for (i = 0; i < poll_cnt; i++) {
363 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
364 		poll_threads();
365 	}
366 
367 	/* check if hwqp's lcore_id has been updated */
368 	for (i = 0; i < fc_port->num_io_queues; i++) {
369 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
370 	}
371 }
372 
373 static void
374 remove_hwqps_from_poll_groups_test(void)
375 {
376 	unsigned i;
377 	struct spdk_nvmf_fc_port *fc_port = NULL;
378 
379 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
380 
381 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
382 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
383 
384 	for (i = 0; i < fc_port->num_io_queues; i++) {
385 		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL);
386 		poll_threads();
387 		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
388 	}
389 }
390 
391 static void
392 destroy_transport_test(void)
393 {
394 	unsigned i;
395 
396 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
397 
398 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
399 		set_thread(i);
400 		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
401 		poll_thread(0);
402 	}
403 
404 	set_thread(0);
405 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
406 	g_lld_fini_called = false;
407 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
408 	poll_threads();
409 	CU_ASSERT(g_lld_fini_called == true);
410 }
411 
412 static int
413 nvmf_fc_tests_init(void)
414 {
415 	return 0;
416 }
417 
418 static int
419 nvmf_fc_tests_fini(void)
420 {
421 	free_threads();
422 	return 0;
423 }
424 
425 int
426 main(int argc, char **argv)
427 {
428 	unsigned int num_failures = 0;
429 	CU_pSuite suite = NULL;
430 
431 	CU_initialize_registry();
432 
433 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
434 
435 	CU_ADD_TEST(suite, create_transport_test);
436 	CU_ADD_TEST(suite, create_poll_groups_test);
437 	CU_ADD_TEST(suite, create_fc_port_test);
438 	CU_ADD_TEST(suite, online_fc_port_test);
439 	CU_ADD_TEST(suite, poll_group_poll_test);
440 	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
441 	CU_ADD_TEST(suite, destroy_transport_test);
442 
443 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
444 	CU_cleanup_registry();
445 
446 	return num_failures;
447 }
448