xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision d919a197d60e407aa1137d7512f8b0af92f3d593)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* NVMF FC Transport Unit Test */
35 
36 #include "spdk/env.h"
37 #include "spdk_cunit.h"
38 #include "spdk/nvmf.h"
39 #include "spdk/endian.h"
40 #include "spdk/trace.h"
41 #include "spdk/log.h"
42 
43 #include "ut_multithread.c"
44 
45 #include "transport.h"
46 #include "nvmf_internal.h"
47 
48 #include "nvmf_fc.h"
49 
50 #include "json/json_util.c"
51 #include "json/json_write.c"
52 #include "nvmf/nvmf.c"
53 #include "nvmf/transport.c"
54 #include "spdk/bdev_module.h"
55 #include "nvmf/subsystem.c"
56 #include "nvmf/fc.c"
57 #include "nvmf/fc_ls.c"
58 
59 /*
60  * SPDK Stuff
61  */
62 
63 #ifdef SPDK_CONFIG_RDMA
64 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
65 	.type = SPDK_NVME_TRANSPORT_RDMA,
66 	.opts_init = NULL,
67 	.create = NULL,
68 	.destroy = NULL,
69 
70 	.listen = NULL,
71 	.stop_listen = NULL,
72 	.accept = NULL,
73 
74 	.listener_discover = NULL,
75 
76 	.poll_group_create = NULL,
77 	.poll_group_destroy = NULL,
78 	.poll_group_add = NULL,
79 	.poll_group_poll = NULL,
80 
81 	.req_free = NULL,
82 	.req_complete = NULL,
83 
84 	.qpair_fini = NULL,
85 	.qpair_get_peer_trid = NULL,
86 	.qpair_get_local_trid = NULL,
87 	.qpair_get_listen_trid = NULL,
88 };
89 #endif
90 
91 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
92 	.type = SPDK_NVME_TRANSPORT_TCP,
93 };
94 
95 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
96 	    (const struct spdk_nvme_transport_id *trid1,
97 	     const struct spdk_nvme_transport_id *trid2), 0);
98 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
99 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
100 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
101 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
102 	    NULL);
103 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
104 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
105 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
106 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
107 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
108 	     struct spdk_bdev_module *module), 0);
109 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
110 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
111 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
112 
113 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
114 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
115 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
116 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
117 		enum spdk_nvme_transport_type trtype));
118 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
119 		struct spdk_nvmf_ctrlr_data *cdata));
120 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
121 	    -ENOSPC);
122 
123 DEFINE_STUB_V(nvmf_update_discovery_log,
124 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
125 
126 DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params),
127 	    (void *)1);
128 DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0);
129 DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data),
130 	    -ENOENT);
131 DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0);
132 DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h));
133 DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0);
134 DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0);
135 
136 DEFINE_STUB(spdk_nvmf_request_zcopy_end, int, (struct spdk_nvmf_request *req, bool commit), 0);
137 
138 const char *
139 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
140 {
141 	switch (trtype) {
142 	case SPDK_NVME_TRANSPORT_PCIE:
143 		return "PCIe";
144 	case SPDK_NVME_TRANSPORT_RDMA:
145 		return "RDMA";
146 	case SPDK_NVME_TRANSPORT_FC:
147 		return "FC";
148 	default:
149 		return NULL;
150 	}
151 }
152 
153 const char *
154 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
155 {
156 	switch (adrfam) {
157 	case SPDK_NVMF_ADRFAM_IPV4:
158 		return "IPv4";
159 	case SPDK_NVMF_ADRFAM_IPV6:
160 		return "IPv6";
161 	case SPDK_NVMF_ADRFAM_IB:
162 		return "IB";
163 	case SPDK_NVMF_ADRFAM_FC:
164 		return "FC";
165 	default:
166 		return NULL;
167 	}
168 }
169 
170 const struct spdk_uuid *
171 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
172 {
173 	return &bdev->uuid;
174 }
175 
176 static bool g_lld_init_called = false;
177 
178 int
179 nvmf_fc_lld_init(void)
180 {
181 	g_lld_init_called = true;
182 	return 0;
183 }
184 
185 static bool g_lld_fini_called = false;
186 
187 void
188 nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx)
189 {
190 	g_lld_fini_called = true;
191 }
192 
193 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
194 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
195 DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
196 DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
197 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
198 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
199 	    0);
200 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
201 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
202 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
203 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
204 				   uint32_t ersp_len), 0);
205 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
206 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
207 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
208 				       struct spdk_nvmf_fc_xchg *xri,
209 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
210 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
211 				       uint16_t ox_id, uint16_t rx_id,
212 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
213 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
214 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
215 		size_t rsp_len), NULL);
216 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
217 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
218 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
219 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
220 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
221 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
222 					uint16_t skip_rq), 0);
223 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
224 		uint64_t *conn_id, uint32_t sq_size), true);
225 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
226 	    (struct spdk_nvmf_fc_hwqp *queues,
227 	     uint32_t num_queues, uint64_t conn_id), NULL);
228 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
229 					struct spdk_nvmf_fc_hwqp *io_queues,
230 					uint32_t num_io_queues,
231 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
232 DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
233 				     struct spdk_nvmf_fc_xchg_info *info));
234 DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
235 
236 uint32_t
237 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
238 {
239 	hwqp->lcore_id++;
240 	return 0; /* always return 0 or else it will poll forever */
241 }
242 
243 struct spdk_nvmf_fc_xchg *
244 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
245 {
246 	static struct spdk_nvmf_fc_xchg xchg;
247 
248 	xchg.xchg_id = 1;
249 	return &xchg;
250 }
251 
252 #define MAX_FC_UT_POLL_THREADS 8
253 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
254 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
255 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
256 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
257 uint8_t g_fc_port_handle = 0xff;
258 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
259 
260 static void
261 _add_transport_done(void *arg, int status)
262 {
263 	CU_ASSERT(status == 0);
264 }
265 
266 static void
267 _add_transport_done_dup_err(void *arg, int status)
268 {
269 	CU_ASSERT(status == -EEXIST);
270 }
271 
272 static void
273 create_transport_test(void)
274 {
275 	const struct spdk_nvmf_transport_ops *ops = NULL;
276 	struct spdk_nvmf_transport_opts opts = { 0 };
277 	struct spdk_nvmf_target_opts tgt_opts = {
278 		.name = "nvmf_test_tgt",
279 		.max_subsystems = 0
280 	};
281 
282 	allocate_threads(8);
283 	set_thread(0);
284 
285 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
286 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
287 
288 	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
289 	SPDK_CU_ASSERT_FATAL(ops != NULL);
290 
291 	ops->opts_init(&opts);
292 
293 	g_lld_init_called = false;
294 	opts.opts_size = sizeof(opts);
295 	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
296 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
297 
298 	CU_ASSERT(g_lld_init_called == true);
299 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
300 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
301 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
302 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
303 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
304 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
305 
306 	set_thread(0);
307 
308 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
309 				    _add_transport_done, 0);
310 	poll_thread(0);
311 
312 	/* Add transport again - should get error */
313 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
314 				    _add_transport_done_dup_err, 0);
315 	poll_thread(0);
316 
317 	/* create transport with bad args/options */
318 #ifndef SPDK_CONFIG_RDMA
319 	CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
320 #endif
321 	CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
322 	opts.max_io_size = 1024 ^ 3;
323 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
324 	opts.max_io_size = 999;
325 	opts.io_unit_size = 1024;
326 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
327 }
328 
329 static void
330 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
331 {
332 	CU_ASSERT(err == 0);
333 	CU_ASSERT(port_handle == 2);
334 	g_fc_port_handle = port_handle;
335 }
336 
337 static void
338 create_fc_port_test(void)
339 {
340 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
341 	struct spdk_nvmf_fc_port *fc_port = NULL;
342 	int err;
343 
344 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
345 
346 	init_args.port_handle = 2;
347 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
348 	init_args.ls_queue_size = 100;
349 	init_args.io_queue_size = 100;
350 	init_args.io_queues = (void *)lld_q;
351 
352 	set_thread(0);
353 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
354 	CU_ASSERT(err == 0);
355 	poll_thread(0);
356 
357 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
358 	CU_ASSERT(fc_port != NULL);
359 }
360 
361 static void
362 online_fc_port_test(void)
363 {
364 	struct spdk_nvmf_fc_port *fc_port;
365 	struct spdk_nvmf_fc_hw_port_online_args args;
366 	int err;
367 
368 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
369 
370 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
371 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
372 
373 	set_thread(0);
374 	args.port_handle = g_fc_port_handle;
375 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
376 	CU_ASSERT(err == 0);
377 	poll_threads();
378 	set_thread(0);
379 	if (err == 0) {
380 		uint32_t i;
381 		for (i = 0; i < fc_port->num_io_queues; i++) {
382 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
383 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
384 			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
385 		}
386 	}
387 }
388 
389 static void
390 create_poll_groups_test(void)
391 {
392 	unsigned i;
393 
394 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
395 
396 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
397 		set_thread(i);
398 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
399 		poll_thread(i);
400 		CU_ASSERT(g_poll_groups[i] != NULL);
401 	}
402 	set_thread(0);
403 }
404 
405 static void
406 poll_group_poll_test(void)
407 {
408 	unsigned i;
409 	unsigned poll_cnt =  10;
410 	struct spdk_nvmf_fc_port *fc_port = NULL;
411 
412 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
413 
414 	set_thread(0);
415 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
416 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
417 
418 	for (i = 0; i < fc_port->num_io_queues; i++) {
419 		fc_port->io_queues[i].lcore_id = 0;
420 	}
421 
422 	for (i = 0; i < poll_cnt; i++) {
423 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
424 		poll_threads();
425 	}
426 
427 	/* check if hwqp's lcore_id has been updated */
428 	for (i = 0; i < fc_port->num_io_queues; i++) {
429 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
430 	}
431 }
432 
433 static void
434 remove_hwqps_from_poll_groups_test(void)
435 {
436 	unsigned i;
437 	struct spdk_nvmf_fc_port *fc_port = NULL;
438 
439 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
440 
441 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
442 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
443 
444 	for (i = 0; i < fc_port->num_io_queues; i++) {
445 		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL);
446 		poll_threads();
447 		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
448 	}
449 }
450 
451 static void
452 destroy_transport_test(void)
453 {
454 	unsigned i;
455 
456 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
457 
458 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
459 		set_thread(i);
460 		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
461 		poll_thread(0);
462 	}
463 
464 	set_thread(0);
465 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
466 	g_lld_fini_called = false;
467 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
468 	poll_threads();
469 	CU_ASSERT(g_lld_fini_called == true);
470 }
471 
472 static int
473 nvmf_fc_tests_init(void)
474 {
475 	return 0;
476 }
477 
478 static int
479 nvmf_fc_tests_fini(void)
480 {
481 	free_threads();
482 	return 0;
483 }
484 
485 int main(int argc, char **argv)
486 {
487 	unsigned int num_failures = 0;
488 	CU_pSuite suite = NULL;
489 
490 	CU_set_error_action(CUEA_ABORT);
491 	CU_initialize_registry();
492 
493 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
494 
495 	CU_ADD_TEST(suite, create_transport_test);
496 	CU_ADD_TEST(suite, create_poll_groups_test);
497 	CU_ADD_TEST(suite, create_fc_port_test);
498 	CU_ADD_TEST(suite, online_fc_port_test);
499 	CU_ADD_TEST(suite, poll_group_poll_test);
500 	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
501 	CU_ADD_TEST(suite, destroy_transport_test);
502 
503 	CU_basic_set_mode(CU_BRM_VERBOSE);
504 	CU_basic_run_tests();
505 	num_failures = CU_get_number_of_failures();
506 	CU_cleanup_registry();
507 
508 	return num_failures;
509 }
510