xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 12fbe739a31b09aff0d05f354d4f3bbef99afc55)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (C) 2019 Intel Corporation.
3   *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
4   *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
5   */
6  
7  /* NVMF FC Transport Unit Test */
8  
9  #include "spdk/env.h"
10  #include "spdk_internal/cunit.h"
11  #include "spdk/nvmf.h"
12  #include "spdk/endian.h"
13  #include "spdk/trace.h"
14  #include "spdk/log.h"
15  
16  #include "ut_multithread.c"
17  
18  #include "transport.h"
19  #include "nvmf_internal.h"
20  
21  #include "nvmf_fc.h"
22  
23  #include "json/json_util.c"
24  #include "json/json_write.c"
25  #include "nvmf/nvmf.c"
26  #include "nvmf/transport.c"
27  #include "spdk/bdev_module.h"
28  #include "nvmf/subsystem.c"
29  #include "nvmf/fc.c"
30  #include "nvmf/fc_ls.c"
31  
32  /*
33   * SPDK Stuff
34   */
35  
36  DEFINE_STUB(spdk_nvme_transport_id_compare, int,
37  	    (const struct spdk_nvme_transport_id *trid1,
38  	     const struct spdk_nvme_transport_id *trid2), 0);
39  DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
40  DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
41  DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
42  DEFINE_STUB_V(nvmf_qpair_abort_pending_zcopy_reqs, (struct spdk_nvmf_qpair *qpair));
43  DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
44  	    NULL);
45  DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
46  DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
47  DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
48  DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
49  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
50  	     struct spdk_bdev_module *module), 0);
51  DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
52  DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
53  DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
54  
55  DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
56  DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
57  	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
58  DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
59  		enum spdk_nvme_transport_type trtype));
60  DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
61  	    -ENOSPC);
62  
63  DEFINE_STUB_V(nvmf_update_discovery_log,
64  	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
65  
66  DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params),
67  	    (void *)1);
68  DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0);
69  DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data),
70  	    -ENOENT);
71  DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0);
72  DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h));
73  DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0);
74  DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0);
75  
76  DEFINE_STUB_V(spdk_nvmf_request_zcopy_start, (struct spdk_nvmf_request *req));
77  DEFINE_STUB_V(spdk_nvmf_request_zcopy_end, (struct spdk_nvmf_request *req, bool commit));
78  DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
79  
80  const char *
81  spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
82  {
83  	switch (trtype) {
84  	case SPDK_NVME_TRANSPORT_PCIE:
85  		return "PCIe";
86  	case SPDK_NVME_TRANSPORT_RDMA:
87  		return "RDMA";
88  	case SPDK_NVME_TRANSPORT_FC:
89  		return "FC";
90  	default:
91  		return NULL;
92  	}
93  }
94  
95  const char *
96  spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
97  {
98  	switch (adrfam) {
99  	case SPDK_NVMF_ADRFAM_IPV4:
100  		return "IPv4";
101  	case SPDK_NVMF_ADRFAM_IPV6:
102  		return "IPv6";
103  	case SPDK_NVMF_ADRFAM_IB:
104  		return "IB";
105  	case SPDK_NVMF_ADRFAM_FC:
106  		return "FC";
107  	default:
108  		return NULL;
109  	}
110  }
111  
112  const struct spdk_uuid *
113  spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
114  {
115  	return &bdev->uuid;
116  }
117  
118  static bool g_lld_init_called = false;
119  
120  int
121  nvmf_fc_lld_init(void)
122  {
123  	g_lld_init_called = true;
124  	return 0;
125  }
126  
127  static bool g_lld_fini_called = false;
128  
129  void
130  nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx)
131  {
132  	g_lld_fini_called = true;
133  }
134  
135  DEFINE_STUB_V(nvmf_fc_lld_start, (void));
136  DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
137  DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
138  DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
139  	    0);
140  DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
141  DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
142  DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
143  DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
144  				   uint32_t ersp_len), 0);
145  DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
146  				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
147  DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
148  				       struct spdk_nvmf_fc_xchg *xri,
149  				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
150  DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
151  				       uint16_t ox_id, uint16_t rx_id,
152  				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
153  				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
154  DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
155  		size_t rsp_len), NULL);
156  DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
157  DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
158  					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
159  					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
160  DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
161  DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
162  					uint16_t skip_rq), 0);
163  DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
164  		uint64_t *conn_id, uint32_t sq_size), true);
165  DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
166  	    (struct spdk_nvmf_fc_hwqp *queues,
167  	     uint32_t num_queues, uint64_t conn_id), NULL);
168  DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
169  					struct spdk_nvmf_fc_hwqp *io_queues,
170  					uint32_t num_io_queues,
171  					struct spdk_nvmf_fc_queue_dump_info *dump_info));
172  
173  uint32_t
174  nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
175  {
176  	hwqp->lcore_id++;
177  	return 0; /* always return 0 or else it will poll forever */
178  }
179  
180  struct spdk_nvmf_fc_xchg *
181  nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
182  {
183  	static struct spdk_nvmf_fc_xchg xchg;
184  
185  	xchg.xchg_id = 1;
186  	return &xchg;
187  }
188  
189  #define MAX_FC_UT_POLL_THREADS 8
190  static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
191  #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
192  static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
193  static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
194  uint8_t g_fc_port_handle = 0xff;
195  struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
196  
197  static void
198  _add_transport_done(void *arg, int status)
199  {
200  	CU_ASSERT(status == 0);
201  }
202  
203  static void
204  _add_transport_done_dup_err(void *arg, int status)
205  {
206  	CU_ASSERT(status == -EEXIST);
207  }
208  
209  static void
210  create_transport_test(void)
211  {
212  	const struct spdk_nvmf_transport_ops *ops = NULL;
213  	struct spdk_nvmf_transport_opts opts = { 0 };
214  	struct spdk_nvmf_target_opts tgt_opts = {
215  		.name = "nvmf_test_tgt",
216  		.max_subsystems = 0
217  	};
218  
219  	allocate_threads(8);
220  	set_thread(0);
221  
222  	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
223  	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
224  
225  	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
226  	SPDK_CU_ASSERT_FATAL(ops != NULL);
227  
228  	ops->opts_init(&opts);
229  
230  	g_lld_init_called = false;
231  	opts.opts_size = sizeof(opts);
232  	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
233  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
234  
235  	CU_ASSERT(g_lld_init_called == true);
236  	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
237  	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
238  	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
239  	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
240  	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
241  	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
242  
243  	set_thread(0);
244  
245  	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
246  				    _add_transport_done, 0);
247  	poll_thread(0);
248  
249  	/* Add transport again - should get error */
250  	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
251  				    _add_transport_done_dup_err, 0);
252  	poll_thread(0);
253  
254  	/* create transport with bad args/options */
255  	opts.max_io_size = 1024 ^ 3;
256  	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
257  	opts.max_io_size = 999;
258  	opts.io_unit_size = 1024;
259  	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
260  }
261  
262  static void
263  port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
264  {
265  	CU_ASSERT(err == 0);
266  	CU_ASSERT(port_handle == 2);
267  	g_fc_port_handle = port_handle;
268  }
269  
270  static void
271  create_fc_port_test(void)
272  {
273  	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
274  	struct spdk_nvmf_fc_port *fc_port = NULL;
275  	int err;
276  
277  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
278  
279  	init_args.port_handle = 2;
280  	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
281  	init_args.ls_queue_size = 100;
282  	init_args.io_queue_size = 100;
283  	init_args.io_queues = (void *)lld_q;
284  
285  	set_thread(0);
286  	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
287  	CU_ASSERT(err == 0);
288  	poll_thread(0);
289  
290  	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
291  	CU_ASSERT(fc_port != NULL);
292  }
293  
294  static void
295  online_fc_port_test(void)
296  {
297  	struct spdk_nvmf_fc_port *fc_port;
298  	struct spdk_nvmf_fc_hw_port_online_args args;
299  	int err;
300  
301  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
302  
303  	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
304  	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
305  
306  	set_thread(0);
307  	args.port_handle = g_fc_port_handle;
308  	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
309  	CU_ASSERT(err == 0);
310  	poll_threads();
311  	set_thread(0);
312  	if (err == 0) {
313  		uint32_t i;
314  		for (i = 0; i < fc_port->num_io_queues; i++) {
315  			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
316  			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
317  			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
318  		}
319  	}
320  }
321  
322  static void
323  create_poll_groups_test(void)
324  {
325  	unsigned i;
326  
327  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
328  
329  	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
330  		set_thread(i);
331  		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
332  		poll_thread(i);
333  		CU_ASSERT(g_poll_groups[i] != NULL);
334  	}
335  	set_thread(0);
336  }
337  
338  static void
339  poll_group_poll_test(void)
340  {
341  	unsigned i;
342  	unsigned poll_cnt =  10;
343  	struct spdk_nvmf_fc_port *fc_port = NULL;
344  
345  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
346  
347  	set_thread(0);
348  	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
349  	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
350  
351  	for (i = 0; i < fc_port->num_io_queues; i++) {
352  		fc_port->io_queues[i].lcore_id = 0;
353  	}
354  
355  	for (i = 0; i < poll_cnt; i++) {
356  		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
357  		poll_threads();
358  	}
359  
360  	/* check if hwqp's lcore_id has been updated */
361  	for (i = 0; i < fc_port->num_io_queues; i++) {
362  		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
363  	}
364  }
365  
366  static void
367  remove_hwqps_from_poll_groups_test(void)
368  {
369  	unsigned i;
370  	struct spdk_nvmf_fc_port *fc_port = NULL;
371  
372  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
373  
374  	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
375  	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
376  
377  	for (i = 0; i < fc_port->num_io_queues; i++) {
378  		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL);
379  		poll_threads();
380  		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
381  	}
382  }
383  
384  static void
385  destroy_transport_test(void)
386  {
387  	unsigned i;
388  
389  	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
390  
391  	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
392  		set_thread(i);
393  		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
394  		poll_thread(0);
395  	}
396  
397  	set_thread(0);
398  	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
399  	g_lld_fini_called = false;
400  	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
401  	poll_threads();
402  	CU_ASSERT(g_lld_fini_called == true);
403  }
404  
405  static int
406  nvmf_fc_tests_init(void)
407  {
408  	return 0;
409  }
410  
411  static int
412  nvmf_fc_tests_fini(void)
413  {
414  	free_threads();
415  	return 0;
416  }
417  
418  int
419  main(int argc, char **argv)
420  {
421  	unsigned int num_failures = 0;
422  	CU_pSuite suite = NULL;
423  
424  	CU_initialize_registry();
425  
426  	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
427  
428  	CU_ADD_TEST(suite, create_transport_test);
429  	CU_ADD_TEST(suite, create_poll_groups_test);
430  	CU_ADD_TEST(suite, create_fc_port_test);
431  	CU_ADD_TEST(suite, online_fc_port_test);
432  	CU_ADD_TEST(suite, poll_group_poll_test);
433  	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
434  	CU_ADD_TEST(suite, destroy_transport_test);
435  
436  	num_failures = spdk_ut_run_tests(argc, argv, NULL);
437  	CU_cleanup_registry();
438  
439  	return num_failures;
440  }
441