xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 4e8e97c886e47e337dc470ac8c1ffa044d729af0)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* NVMF FC Transport Unit Test */
35 
36 #include "spdk/env.h"
37 #include "spdk_cunit.h"
38 #include "spdk/nvmf.h"
39 #include "spdk/endian.h"
40 #include "spdk/trace.h"
41 #include "spdk/log.h"
42 
43 #include "ut_multithread.c"
44 
45 #include "transport.h"
46 #include "nvmf_internal.h"
47 
48 #include "nvmf_fc.h"
49 
50 #include "json/json_util.c"
51 #include "json/json_write.c"
52 #include "nvmf/nvmf.c"
53 #include "nvmf/transport.c"
54 #include "nvmf/subsystem.c"
55 #include "nvmf/fc.c"
56 #include "nvmf/fc_ls.c"
57 
58 /*
59  * SPDK Stuff
60  */
61 
62 #ifdef SPDK_CONFIG_RDMA
63 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
64 	.type = SPDK_NVME_TRANSPORT_RDMA,
65 	.opts_init = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 
69 	.listen = NULL,
70 	.stop_listen = NULL,
71 	.accept = NULL,
72 
73 	.listener_discover = NULL,
74 
75 	.poll_group_create = NULL,
76 	.poll_group_destroy = NULL,
77 	.poll_group_add = NULL,
78 	.poll_group_poll = NULL,
79 
80 	.req_free = NULL,
81 	.req_complete = NULL,
82 
83 	.qpair_fini = NULL,
84 	.qpair_get_peer_trid = NULL,
85 	.qpair_get_local_trid = NULL,
86 	.qpair_get_listen_trid = NULL,
87 };
88 #endif
89 
90 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
91 	.type = SPDK_NVME_TRANSPORT_TCP,
92 };
93 
94 struct spdk_trace_histories *g_trace_histories;
95 
96 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
97 				   uint32_t size, uint64_t object_id, uint64_t arg1));
98 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
99 	    (const struct spdk_nvme_transport_id *trid1,
100 	     const struct spdk_nvme_transport_id *trid2), 0);
101 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
102 DEFINE_STUB_V(spdk_trace_register_description,
103 	      (const char *name, uint16_t tpoint_id, uint8_t owner_type,
104 	       uint8_t object_type, uint8_t new_object, uint8_t arg1_type,
105 	       const char *arg1_name));
106 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
107 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
108 DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
109 DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
110 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
111 	    NULL);
112 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
113 DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
114 DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
115 				  spdk_bdev_remove_cb_t remove_cb,
116 				  void *remove_ctx, struct spdk_bdev_desc **desc), 0);
117 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
118 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
119 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
120 	     struct spdk_bdev_module *module), 0);
121 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
122 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
123 DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
124 
125 DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
126 DEFINE_STUB(nvmf_ctrlr_async_event_ana_change_notice, int,
127 	    (struct spdk_nvmf_ctrlr *ctrlr), 0);
128 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
129 		enum spdk_nvme_transport_type trtype));
130 DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
131 		struct spdk_nvmf_ctrlr_data *cdata));
132 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
133 	    -ENOSPC);
134 
135 const char *
136 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
137 {
138 	switch (trtype) {
139 	case SPDK_NVME_TRANSPORT_PCIE:
140 		return "PCIe";
141 	case SPDK_NVME_TRANSPORT_RDMA:
142 		return "RDMA";
143 	case SPDK_NVME_TRANSPORT_FC:
144 		return "FC";
145 	default:
146 		return NULL;
147 	}
148 }
149 
150 const char *
151 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
152 {
153 	switch (adrfam) {
154 	case SPDK_NVMF_ADRFAM_IPV4:
155 		return "IPv4";
156 	case SPDK_NVMF_ADRFAM_IPV6:
157 		return "IPv6";
158 	case SPDK_NVMF_ADRFAM_IB:
159 		return "IB";
160 	case SPDK_NVMF_ADRFAM_FC:
161 		return "FC";
162 	default:
163 		return NULL;
164 	}
165 }
166 
167 const struct spdk_uuid *
168 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
169 {
170 	return &bdev->uuid;
171 }
172 
173 static bool g_lld_init_called = false;
174 
175 int
176 nvmf_fc_lld_init(void)
177 {
178 	g_lld_init_called = true;
179 	return 0;
180 }
181 
182 static bool g_lld_fini_called = false;
183 
184 void
185 nvmf_fc_lld_fini(void)
186 {
187 	g_lld_fini_called = true;
188 }
189 
190 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
191 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
192 DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
193 DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
194 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
195 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
196 	    0);
197 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
198 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
199 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
200 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
201 				   uint32_t ersp_len), 0);
202 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
203 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
204 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
205 				       struct spdk_nvmf_fc_xchg *xri,
206 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
207 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
208 				       uint16_t ox_id, uint16_t rx_id,
209 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
210 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
211 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
212 		size_t rsp_len), NULL);
213 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
214 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
215 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
216 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
217 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
218 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
219 					uint16_t skip_rq), 0);
220 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
221 		uint64_t *conn_id, uint32_t sq_size), true);
222 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
223 	    (struct spdk_nvmf_fc_hwqp *queues,
224 	     uint32_t num_queues, uint64_t conn_id), NULL);
225 DEFINE_STUB_V(nvmf_fc_release_conn, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
226 				     uint32_t sq_size));
227 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
228 					struct spdk_nvmf_fc_hwqp *io_queues,
229 					uint32_t num_io_queues,
230 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
231 DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
232 				     struct spdk_nvmf_fc_xchg_info *info));
233 DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
234 
235 uint32_t
236 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
237 {
238 	hwqp->lcore_id++;
239 	return 0; /* always return 0 or else it will poll forever */
240 }
241 
242 struct spdk_nvmf_fc_xchg *
243 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
244 {
245 	static struct spdk_nvmf_fc_xchg xchg;
246 
247 	xchg.xchg_id = 1;
248 	return &xchg;
249 }
250 
251 #define MAX_FC_UT_POLL_THREADS 8
252 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
253 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
254 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
255 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
256 uint8_t g_fc_port_handle = 0xff;
257 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
258 
259 static void
260 _add_transport_done(void *arg, int status)
261 {
262 	CU_ASSERT(status == 0);
263 }
264 
265 static void
266 _add_transport_done_dup_err(void *arg, int status)
267 {
268 	CU_ASSERT(status == -EEXIST);
269 }
270 
271 static void
272 create_transport_test(void)
273 {
274 	const struct spdk_nvmf_transport_ops *ops = NULL;
275 	struct spdk_nvmf_transport_opts opts = { 0 };
276 	struct spdk_nvmf_target_opts tgt_opts = {
277 		.name = "nvmf_test_tgt",
278 		.max_subsystems = 0
279 	};
280 
281 	allocate_threads(8);
282 	set_thread(0);
283 
284 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
285 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
286 
287 	ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
288 	SPDK_CU_ASSERT_FATAL(ops != NULL);
289 
290 	ops->opts_init(&opts);
291 
292 	g_lld_init_called = false;
293 	g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
294 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
295 
296 	CU_ASSERT(g_lld_init_called == true);
297 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
298 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
299 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
300 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
301 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
302 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
303 
304 	set_thread(0);
305 
306 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
307 				    _add_transport_done, 0);
308 	poll_thread(0);
309 
310 	/* Add transport again - should get error */
311 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
312 				    _add_transport_done_dup_err, 0);
313 	poll_thread(0);
314 
315 	/* create transport with bad args/options */
316 #ifndef SPDK_CONFIG_RDMA
317 	CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
318 #endif
319 	CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
320 	opts.max_io_size = 1024 ^ 3;
321 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
322 	opts.max_io_size = 999;
323 	opts.io_unit_size = 1024;
324 	CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
325 }
326 
327 static void
328 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
329 {
330 	CU_ASSERT(err == 0);
331 	CU_ASSERT(port_handle == 2);
332 	g_fc_port_handle = port_handle;
333 }
334 
335 static void
336 create_fc_port_test(void)
337 {
338 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
339 	struct spdk_nvmf_fc_port *fc_port = NULL;
340 	int err;
341 
342 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
343 
344 	init_args.port_handle = 2;
345 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
346 	init_args.ls_queue_size = 100;
347 	init_args.io_queue_size = 100;
348 	init_args.io_queues = (void *)lld_q;
349 
350 	set_thread(0);
351 	err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
352 	CU_ASSERT(err == 0);
353 	poll_thread(0);
354 
355 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
356 	CU_ASSERT(fc_port != NULL);
357 }
358 
359 static void
360 online_fc_port_test(void)
361 {
362 	struct spdk_nvmf_fc_port *fc_port;
363 	struct spdk_nvmf_fc_hw_port_online_args args;
364 	int err;
365 
366 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
367 
368 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
369 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
370 
371 	set_thread(0);
372 	args.port_handle = g_fc_port_handle;
373 	err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
374 	CU_ASSERT(err == 0);
375 	poll_threads();
376 	set_thread(0);
377 	if (err == 0) {
378 		uint32_t i;
379 		for (i = 0; i < fc_port->num_io_queues; i++) {
380 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
381 			CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
382 			CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
383 		}
384 	}
385 }
386 
387 static void
388 create_poll_groups_test(void)
389 {
390 	unsigned i;
391 
392 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
393 
394 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
395 		set_thread(i);
396 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
397 		poll_thread(i);
398 		CU_ASSERT(g_poll_groups[i] != NULL);
399 	}
400 	set_thread(0);
401 }
402 
403 static void
404 poll_group_poll_test(void)
405 {
406 	unsigned i;
407 	unsigned poll_cnt =  10;
408 	struct spdk_nvmf_fc_port *fc_port = NULL;
409 
410 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
411 
412 	set_thread(0);
413 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
414 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
415 
416 	for (i = 0; i < fc_port->num_io_queues; i++) {
417 		fc_port->io_queues[i].lcore_id = 0;
418 	}
419 
420 	for (i = 0; i < poll_cnt; i++) {
421 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
422 		poll_threads();
423 	}
424 
425 	/* check if hwqp's lcore_id has been updated */
426 	for (i = 0; i < fc_port->num_io_queues; i++) {
427 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
428 	}
429 }
430 
431 static void
432 remove_hwqps_from_poll_groups_test(void)
433 {
434 	unsigned i;
435 	struct spdk_nvmf_fc_port *fc_port = NULL;
436 
437 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
438 
439 	fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
440 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
441 
442 	for (i = 0; i < fc_port->num_io_queues; i++) {
443 		nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i]);
444 		poll_threads();
445 		CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
446 	}
447 }
448 
449 static void
450 destroy_transport_test(void)
451 {
452 	unsigned i;
453 
454 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
455 
456 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
457 		set_thread(i);
458 		spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
459 		poll_thread(0);
460 	}
461 
462 	set_thread(0);
463 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
464 	g_lld_fini_called = false;
465 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
466 	poll_threads();
467 	CU_ASSERT(g_lld_fini_called == true);
468 }
469 
470 static int
471 nvmf_fc_tests_init(void)
472 {
473 	return 0;
474 }
475 
476 static int
477 nvmf_fc_tests_fini(void)
478 {
479 	free_threads();
480 	return 0;
481 }
482 
483 int main(int argc, char **argv)
484 {
485 	unsigned int num_failures = 0;
486 	CU_pSuite suite = NULL;
487 
488 	CU_set_error_action(CUEA_ABORT);
489 	CU_initialize_registry();
490 
491 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
492 
493 	CU_ADD_TEST(suite, create_transport_test);
494 	CU_ADD_TEST(suite, create_poll_groups_test);
495 	CU_ADD_TEST(suite, create_fc_port_test);
496 	CU_ADD_TEST(suite, online_fc_port_test);
497 	CU_ADD_TEST(suite, poll_group_poll_test);
498 	CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
499 	CU_ADD_TEST(suite, destroy_transport_test);
500 
501 	CU_basic_set_mode(CU_BRM_VERBOSE);
502 	CU_basic_run_tests();
503 	num_failures = CU_get_number_of_failures();
504 	CU_cleanup_registry();
505 
506 	return num_failures;
507 }
508