xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 441431d22872ae4e05a1bf8b78e9aeff1eba1eb3)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* NVMF FC Transport Unit Test */
35 
36 #include "spdk/env.h"
37 #include "spdk_cunit.h"
38 #include "spdk/nvmf.h"
39 #include "spdk/endian.h"
40 #include "spdk/trace.h"
41 #include "spdk/log.h"
42 
43 #include "ut_multithread.c"
44 
45 #include "transport.h"
46 #include "nvmf_internal.h"
47 
48 #include "nvmf_fc.h"
49 
50 #include "json/json_util.c"
51 #include "json/json_write.c"
52 #include "nvmf/nvmf.c"
53 #include "nvmf/transport.c"
54 #include "spdk/bdev_module.h"
55 #include "nvmf/subsystem.c"
56 #include "nvmf/fc.c"
57 #include "nvmf/fc_ls.c"
58 
59 /*
60  * SPDK Stuff
61  */
62 
63 #ifdef SPDK_CONFIG_RDMA
64 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
65 	.type = SPDK_NVME_TRANSPORT_RDMA,
66 	.opts_init = NULL,
67 	.create = NULL,
68 	.destroy = NULL,
69 
70 	.listen = NULL,
71 	.stop_listen = NULL,
72 	.accept = NULL,
73 
74 	.listener_discover = NULL,
75 
76 	.poll_group_create = NULL,
77 	.poll_group_destroy = NULL,
78 	.poll_group_add = NULL,
79 	.poll_group_poll = NULL,
80 
81 	.req_free = NULL,
82 	.req_complete = NULL,
83 
84 	.qpair_fini = NULL,
85 	.qpair_get_peer_trid = NULL,
86 	.qpair_get_local_trid = NULL,
87 	.qpair_get_listen_trid = NULL,
88 };
89 #endif
90 
91 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
92 	.type = SPDK_NVME_TRANSPORT_TCP,
93 };
94 
95 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
96 	    (const struct spdk_nvme_transport_id *trid1,
97 	     const struct spdk_nvme_transport_id *trid2), 0);
98 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
99 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
100 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
101 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
102 	    NULL);
103 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
104 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
105 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
106 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
107 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
108 	     struct spdk_bdev_module *module), 0);
109 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
110 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
111 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
112 
113 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
114 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
115 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
116 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
117 		enum spdk_nvme_transport_type trtype));
118 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
119 		struct spdk_nvmf_ctrlr_data *cdata));
120 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
121 	    -ENOSPC);
122 
123 DEFINE_STUB_V(nvmf_update_discovery_log,
124 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn));
125 
126 DEFINE_STUB(rte_hash_create, struct rte_hash *, (const struct rte_hash_parameters *params),
127 	    (void *)1);
128 DEFINE_STUB(rte_hash_del_key, int32_t, (const struct rte_hash *h, const void *key), 0);
129 DEFINE_STUB(rte_hash_lookup_data, int, (const struct rte_hash *h, const void *key, void **data),
130 	    -ENOENT);
131 DEFINE_STUB(rte_hash_add_key_data, int, (const struct rte_hash *h, const void *key, void *data), 0);
132 DEFINE_STUB_V(rte_hash_free, (struct rte_hash *h));
133 DEFINE_STUB(nvmf_fc_lld_port_add, int, (struct spdk_nvmf_fc_port *fc_port), 0);
134 DEFINE_STUB(nvmf_fc_lld_port_remove, int, (struct spdk_nvmf_fc_port *fc_port), 0);
135 
136 const char *
137 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
138 {
139 	switch (trtype) {
140 	case SPDK_NVME_TRANSPORT_PCIE:
141 		return "PCIe";
142 	case SPDK_NVME_TRANSPORT_RDMA:
143 		return "RDMA";
144 	case SPDK_NVME_TRANSPORT_FC:
145 		return "FC";
146 	default:
147 		return NULL;
148 	}
149 }
150 
151 const char *
152 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
153 {
154 	switch (adrfam) {
155 	case SPDK_NVMF_ADRFAM_IPV4:
156 		return "IPv4";
157 	case SPDK_NVMF_ADRFAM_IPV6:
158 		return "IPv6";
159 	case SPDK_NVMF_ADRFAM_IB:
160 		return "IB";
161 	case SPDK_NVMF_ADRFAM_FC:
162 		return "FC";
163 	default:
164 		return NULL;
165 	}
166 }
167 
168 const struct spdk_uuid *
169 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
170 {
171 	return &bdev->uuid;
172 }
173 
174 static bool g_lld_init_called = false;
175 
176 int
177 nvmf_fc_lld_init(void)
178 {
179 	g_lld_init_called = true;
180 	return 0;
181 }
182 
183 static bool g_lld_fini_called = false;
184 
185 void
186 nvmf_fc_lld_fini(spdk_nvmf_transport_destroy_done_cb cb_fn, void *ctx)
187 {
188 	g_lld_fini_called = true;
189 }
190 
191 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
192 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
193 DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
194 DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
195 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
196 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
197 	    0);
198 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
199 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
200 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
201 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
202 				   uint32_t ersp_len), 0);
203 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
204 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
205 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
206 				       struct spdk_nvmf_fc_xchg *xri,
207 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
208 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
209 				       uint16_t ox_id, uint16_t rx_id,
210 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
211 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
212 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
213 		size_t rsp_len), NULL);
214 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
215 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
216 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
217 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
218 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
219 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
220 					uint16_t skip_rq), 0);
221 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
222 		uint64_t *conn_id, uint32_t sq_size), true);
223 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
224 	    (struct spdk_nvmf_fc_hwqp *queues,
225 	     uint32_t num_queues, uint64_t conn_id), NULL);
226 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
227 					struct spdk_nvmf_fc_hwqp *io_queues,
228 					uint32_t num_io_queues,
229 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
230 DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
231 				     struct spdk_nvmf_fc_xchg_info *info));
232 DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
233 
234 uint32_t
235 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
236 {
237 	hwqp->lcore_id++;
238 	return 0; /* always return 0 or else it will poll forever */
239 }
240 
241 struct spdk_nvmf_fc_xchg *
242 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
243 {
244 	static struct spdk_nvmf_fc_xchg xchg;
245 
246 	xchg.xchg_id = 1;
247 	return &xchg;
248 }
249 
250 #define MAX_FC_UT_POLL_THREADS 8
251 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
252 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
253 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
254 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
255 uint8_t g_fc_port_handle = 0xff;
256 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
257 
258 static void
259 _add_transport_done(void *arg, int status)
260 {
261 	CU_ASSERT(status == 0);
262 }
263 
264 static void
265 _add_transport_done_dup_err(void *arg, int status)
266 {
267 	CU_ASSERT(status == -EEXIST);
268 }
269 
270 static void
271 create_transport_test(void)
272 {
273 	const struct spdk_nvmf_transport_ops *ops = NULL;
274 	struct spdk_nvmf_transport_opts opts = { 0 };
275 	struct spdk_nvmf_target_opts tgt_opts = {
276 		.name = "nvmf_test_tgt",
277 		.max_subsystems = 0
278 	};
279 
280 	allocate_threads(8);
281 	set_thread(0);
282 
283 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
284 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
285 
286 	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
287 	SPDK_CU_ASSERT_FATAL(ops != NULL);
288 
289 	ops->opts_init(&opts);
290 
291 	g_lld_init_called = false;
292 	opts.opts_size = sizeof(opts);
293 	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
294 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
295 
296 	CU_ASSERT(g_lld_init_called == true);
297 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
298 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
299 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
300 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
301 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
302 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
303 
304 	set_thread(0);
305 
306 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
307 				    _add_transport_done, 0);
308 	poll_thread(0);
309 
310 	/* Add transport again - should get error */
311 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
312 				    _add_transport_done_dup_err, 0);
313 	poll_thread(0);
314 
315 	/* create transport with bad args/options */
316 #ifndef SPDK_CONFIG_RDMA
317 	CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
318 #endif
319 	CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
320 	opts.max_io_size = 1024 ^ 3;
321 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
322 	opts.max_io_size = 999;
323 	opts.io_unit_size = 1024;
324 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
325 }
326 
327 static void
328 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
329 {
330 	CU_ASSERT(err == 0);
331 	CU_ASSERT(port_handle == 2);
332 	g_fc_port_handle = port_handle;
333 }
334 
335 static void
336 create_fc_port_test(void)
337 {
338 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
339 	struct spdk_nvmf_fc_port *fc_port = NULL;
340 	int err;
341 
342 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
343 
344 	init_args.port_handle = 2;
345 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
346 	init_args.ls_queue_size = 100;
347 	init_args.io_queue_size = 100;
348 	init_args.io_queues = (void *)lld_q;
349 
350 	set_thread(0);
351 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
352 	CU_ASSERT(err == 0);
353 	poll_thread(0);
354 
355 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
356 	CU_ASSERT(fc_port != NULL);
357 }
358 
359 static void
360 online_fc_port_test(void)
361 {
362 	struct spdk_nvmf_fc_port *fc_port;
363 	struct spdk_nvmf_fc_hw_port_online_args args;
364 	int err;
365 
366 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
367 
368 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
369 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
370 
371 	set_thread(0);
372 	args.port_handle = g_fc_port_handle;
373 	err = nvmf_fc_main_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
374 	CU_ASSERT(err == 0);
375 	poll_threads();
376 	set_thread(0);
377 	if (err == 0) {
378 		uint32_t i;
379 		for (i = 0; i < fc_port->num_io_queues; i++) {
380 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
381 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
382 			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
383 		}
384 	}
385 }
386 
387 static void
388 create_poll_groups_test(void)
389 {
390 	unsigned i;
391 
392 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
393 
394 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
395 		set_thread(i);
396 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
397 		poll_thread(i);
398 		CU_ASSERT(g_poll_groups[i] != NULL);
399 	}
400 	set_thread(0);
401 }
402 
403 static void
404 poll_group_poll_test(void)
405 {
406 	unsigned i;
407 	unsigned poll_cnt =  10;
408 	struct spdk_nvmf_fc_port *fc_port = NULL;
409 
410 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
411 
412 	set_thread(0);
413 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
414 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
415 
416 	for (i = 0; i < fc_port->num_io_queues; i++) {
417 		fc_port->io_queues[i].lcore_id = 0;
418 	}
419 
420 	for (i = 0; i < poll_cnt; i++) {
421 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
422 		poll_threads();
423 	}
424 
425 	/* check if hwqp's lcore_id has been updated */
426 	for (i = 0; i < fc_port->num_io_queues; i++) {
427 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
428 	}
429 }
430 
431 static void
432 remove_hwqps_from_poll_groups_test(void)
433 {
434 	unsigned i;
435 	struct spdk_nvmf_fc_port *fc_port = NULL;
436 
437 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
438 
439 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
440 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
441 
442 	for (i = 0; i < fc_port->num_io_queues; i++) {
443 		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i], NULL, NULL);
444 		poll_threads();
445 		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
446 	}
447 }
448 
449 static void
450 destroy_transport_test(void)
451 {
452 	unsigned i;
453 
454 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
455 
456 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
457 		set_thread(i);
458 		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
459 		poll_thread(0);
460 	}
461 
462 	set_thread(0);
463 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
464 	g_lld_fini_called = false;
465 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
466 	poll_threads();
467 	CU_ASSERT(g_lld_fini_called == true);
468 }
469 
470 static int
471 nvmf_fc_tests_init(void)
472 {
473 	return 0;
474 }
475 
476 static int
477 nvmf_fc_tests_fini(void)
478 {
479 	free_threads();
480 	return 0;
481 }
482 
483 int main(int argc, char **argv)
484 {
485 	unsigned int num_failures = 0;
486 	CU_pSuite suite = NULL;
487 
488 	CU_set_error_action(CUEA_ABORT);
489 	CU_initialize_registry();
490 
491 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
492 
493 	CU_ADD_TEST(suite, create_transport_test);
494 	CU_ADD_TEST(suite, create_poll_groups_test);
495 	CU_ADD_TEST(suite, create_fc_port_test);
496 	CU_ADD_TEST(suite, online_fc_port_test);
497 	CU_ADD_TEST(suite, poll_group_poll_test);
498 	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
499 	CU_ADD_TEST(suite, destroy_transport_test);
500 
501 	CU_basic_set_mode(CU_BRM_VERBOSE);
502 	CU_basic_run_tests();
503 	num_failures = CU_get_number_of_failures();
504 	CU_cleanup_registry();
505 
506 	return num_failures;
507 }
508