xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 66289a6dbe28217365daa40fd92dcf327871c2e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5  */
6 
7 /* NVMF FC Transport Unit Test */
8 
9 #include "spdk/env.h"
10 #include "spdk_internal/cunit.h"
11 #include "spdk/nvmf.h"
12 #include "spdk/endian.h"
13 #include "spdk/trace.h"
14 #include "spdk/log.h"
15 #include "spdk/util.h"
16 
17 #include "ut_multithread.c"
18 
19 #include "transport.h"
20 #include "nvmf_internal.h"
21 
22 #include "nvmf_fc.h"
23 
24 #include "json/json_util.c"
25 #include "json/json_write.c"
26 #include "nvmf/nvmf.c"
27 #include "nvmf/transport.c"
28 #include "spdk/bdev_module.h"
29 #include "nvmf/subsystem.c"
30 #include "nvmf/fc.c"
31 #include "nvmf/fc_ls.c"
32 
33 /*
34  * SPDK Stuff
35  */
36 
37 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
38 	    (const struct spdk_nvme_transport_id *trid1,
39 	     const struct spdk_nvme_transport_id *trid2), 0);
40 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
41 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
42 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
43 DEFINE_STUB_V(nvmf_qpair_abort_pending_zcopy_reqs, (struct spdk_nvmf_qpair *qpair));
44 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
45 	    NULL);
46 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
47 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
48 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
49 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
50 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
51 	     struct spdk_bdev_module *module), 0);
52 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
53 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
54 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
55 
56 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
57 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
58 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
59 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
60 		enum spdk_nvme_transport_type trtype));
61 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
62 	    -ENOSPC);
63 
64 DEFINE_STUB_V(spdk_nvmf_send_discovery_log_notice,
65 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
66 
67 DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params),
68 	    (void *)1);
69 DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0);
70 DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data),
71 	    -ENOENT);
72 DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0);
73 DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h));
74 DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0);
75 DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0);
76 
77 DEFINE_STUB_V(spdk_nvmf_request_zcopy_start, (struct spdk_nvmf_request *req));
78 DEFINE_STUB_V(spdk_nvmf_request_zcopy_end, (struct spdk_nvmf_request *req, bool commit));
79 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
80 
81 DEFINE_STUB(spdk_json_parse, ssize_t, (void *json, size_t size, struct spdk_json_val *values,
82 				       size_t num_values, void **end, uint32_t flags), 0);
83 DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
84 DEFINE_STUB_V(nvmf_qpair_auth_destroy, (struct spdk_nvmf_qpair *q));
85 DEFINE_STUB_V(nvmf_tgt_stop_mdns_prr, (struct spdk_nvmf_tgt *tgt));
86 DEFINE_STUB(nvmf_tgt_update_mdns_prr, int, (struct spdk_nvmf_tgt *tgt), 0);
87 DEFINE_STUB(spdk_posix_file_load_from_name, void *, (const char *file_name, size_t *size), NULL);
88 
89 const char *
90 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
91 {
92 	switch (trtype) {
93 	case SPDK_NVME_TRANSPORT_PCIE:
94 		return "PCIe";
95 	case SPDK_NVME_TRANSPORT_RDMA:
96 		return "RDMA";
97 	case SPDK_NVME_TRANSPORT_FC:
98 		return "FC";
99 	default:
100 		return NULL;
101 	}
102 }
103 
104 const char *
105 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
106 {
107 	switch (adrfam) {
108 	case SPDK_NVMF_ADRFAM_IPV4:
109 		return "IPv4";
110 	case SPDK_NVMF_ADRFAM_IPV6:
111 		return "IPv6";
112 	case SPDK_NVMF_ADRFAM_IB:
113 		return "IB";
114 	case SPDK_NVMF_ADRFAM_FC:
115 		return "FC";
116 	default:
117 		return NULL;
118 	}
119 }
120 
121 const struct spdk_uuid *
122 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
123 {
124 	return &bdev->uuid;
125 }
126 
127 static bool g_lld_init_called = false;
128 
129 int
130 nvmf_fc_lld_init(void)
131 {
132 	g_lld_init_called = true;
133 	return 0;
134 }
135 
136 static bool g_lld_fini_called = false;
137 
138 void
139 nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx)
140 {
141 	g_lld_fini_called = true;
142 }
143 
144 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
145 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
146 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
147 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
148 	    0);
149 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
150 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
151 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
152 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
153 				   uint32_t ersp_len), 0);
154 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
155 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
156 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
157 				       struct spdk_nvmf_fc_xchg *xri,
158 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
159 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
160 				       uint16_t ox_id, uint16_t rx_id,
161 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
162 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
163 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
164 		size_t rsp_len), NULL);
165 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
166 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
167 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
168 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
169 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
170 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
171 					uint16_t skip_rq), 0);
172 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
173 		uint64_t *conn_id, uint32_t sq_size), true);
174 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
175 	    (struct spdk_nvmf_fc_hwqp *queues,
176 	     uint32_t num_queues, uint64_t conn_id), NULL);
177 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
178 					struct spdk_nvmf_fc_hwqp *io_queues,
179 					uint32_t num_io_queues,
180 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
181 
182 uint32_t
183 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
184 {
185 	hwqp->lcore_id++;
186 	return 0; /* always return 0 or else it will poll forever */
187 }
188 
189 struct spdk_nvmf_fc_xchg *
190 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
191 {
192 	static struct spdk_nvmf_fc_xchg xchg;
193 
194 	xchg.xchg_id = 1;
195 	return &xchg;
196 }
197 
198 #define MAX_FC_UT_POLL_THREADS 8
199 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
200 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
201 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
202 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
203 uint8_t g_fc_port_handle = 0xff;
204 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
205 
206 static void
207 _add_transport_done(void *arg, int status)
208 {
209 	CU_ASSERT(status == 0);
210 }
211 
212 static void
213 _add_transport_done_dup_err(void *arg, int status)
214 {
215 	CU_ASSERT(status == -EEXIST);
216 }
217 
218 static void
219 create_transport_test(void)
220 {
221 	const struct spdk_nvmf_transport_ops *ops = NULL;
222 	struct spdk_nvmf_transport_opts opts = { 0 };
223 	struct spdk_nvmf_target_opts tgt_opts = {
224 		.size = SPDK_SIZEOF(&tgt_opts, discovery_filter),
225 		.name = "nvmf_test_tgt",
226 		.max_subsystems = 0
227 	};
228 
229 	allocate_threads(8);
230 	set_thread(0);
231 
232 	spdk_iobuf_initialize();
233 
234 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
235 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
236 
237 	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
238 	SPDK_CU_ASSERT_FATAL(ops != NULL);
239 
240 	ops->opts_init(&opts);
241 
242 	g_lld_init_called = false;
243 	opts.opts_size = sizeof(opts);
244 	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
245 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
246 
247 	CU_ASSERT(g_lld_init_called == true);
248 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
249 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
250 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
251 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
252 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
253 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
254 
255 	set_thread(0);
256 
257 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
258 				    _add_transport_done, 0);
259 	poll_thread(0);
260 
261 	/* Add transport again - should get error */
262 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
263 				    _add_transport_done_dup_err, 0);
264 	poll_thread(0);
265 
266 	/* create transport with bad args/options */
267 	opts.max_io_size = 1024 ^ 3;
268 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
269 	opts.max_io_size = 999;
270 	opts.io_unit_size = 1024;
271 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
272 }
273 
274 static void
275 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
276 {
277 	CU_ASSERT(err == 0);
278 	CU_ASSERT(port_handle == 2);
279 	g_fc_port_handle = port_handle;
280 }
281 
282 static void
283 create_fc_port_test(void)
284 {
285 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
286 	struct spdk_nvmf_fc_port *fc_port = NULL;
287 	int err;
288 
289 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
290 
291 	init_args.port_handle = 2;
292 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
293 	init_args.ls_queue_size = 100;
294 	init_args.io_queue_size = 100;
295 	init_args.io_queues = (void *)lld_q;
296 
297 	set_thread(0);
298 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
299 	CU_ASSERT(err == 0);
300 	poll_thread(0);
301 
302 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
303 	CU_ASSERT(fc_port != NULL);
304 }
305 
306 static void
307 online_fc_port_test(void)
308 {
309 	struct spdk_nvmf_fc_port *fc_port;
310 	struct spdk_nvmf_fc_hw_port_online_args args;
311 	int err;
312 
313 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
314 
315 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
316 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
317 
318 	set_thread(0);
319 	args.port_handle = g_fc_port_handle;
320 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
321 	CU_ASSERT(err == 0);
322 	poll_threads();
323 	set_thread(0);
324 	if (err == 0) {
325 		uint32_t i;
326 		for (i = 0; i < fc_port->num_io_queues; i++) {
327 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
328 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
329 			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
330 		}
331 	}
332 }
333 
334 static void
335 create_poll_groups_test(void)
336 {
337 	unsigned i;
338 
339 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
340 
341 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
342 		set_thread(i);
343 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
344 		poll_thread(i);
345 		CU_ASSERT(g_poll_groups[i] != NULL);
346 	}
347 	set_thread(0);
348 }
349 
350 static void
351 poll_group_poll_test(void)
352 {
353 	unsigned i;
354 	unsigned poll_cnt =  10;
355 	struct spdk_nvmf_fc_port *fc_port = NULL;
356 
357 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
358 
359 	set_thread(0);
360 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
361 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
362 
363 	for (i = 0; i < fc_port->num_io_queues; i++) {
364 		fc_port->io_queues[i].lcore_id = 0;
365 	}
366 
367 	for (i = 0; i < poll_cnt; i++) {
368 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
369 		poll_threads();
370 	}
371 
372 	/* check if hwqp's lcore_id has been updated */
373 	for (i = 0; i < fc_port->num_io_queues; i++) {
374 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
375 	}
376 }
377 
378 static void
379 remove_hwqps_from_poll_groups_test(void)
380 {
381 	unsigned i;
382 	struct spdk_nvmf_fc_port *fc_port = NULL;
383 
384 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
385 
386 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
387 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
388 
389 	for (i = 0; i < fc_port->num_io_queues; i++) {
390 		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL);
391 		poll_threads();
392 		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
393 	}
394 }
395 
396 static void
397 destroy_transport_test(void)
398 {
399 	unsigned i;
400 
401 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
402 
403 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
404 		set_thread(i);
405 		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
406 		poll_thread(0);
407 	}
408 
409 	set_thread(0);
410 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
411 	g_lld_fini_called = false;
412 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_lld_fini_called == true);
415 }
416 
417 static int
418 nvmf_fc_tests_init(void)
419 {
420 	return 0;
421 }
422 
423 static int
424 nvmf_fc_tests_fini(void)
425 {
426 	free_threads();
427 	return 0;
428 }
429 
430 int
431 main(int argc, char **argv)
432 {
433 	unsigned int num_failures = 0;
434 	CU_pSuite suite = NULL;
435 
436 	CU_initialize_registry();
437 
438 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
439 
440 	CU_ADD_TEST(suite, create_transport_test);
441 	CU_ADD_TEST(suite, create_poll_groups_test);
442 	CU_ADD_TEST(suite, create_fc_port_test);
443 	CU_ADD_TEST(suite, online_fc_port_test);
444 	CU_ADD_TEST(suite, poll_group_poll_test);
445 	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
446 	CU_ADD_TEST(suite, destroy_transport_test);
447 
448 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
449 	CU_cleanup_registry();
450 
451 	return num_failures;
452 }
453