xref: /spdk/test/unit/lib/nvmf/fc.c/fc_ut.c (revision 407e88fd2ab020d753e33014cf759353a9901b51)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2018-2019 Broadcom.  All Rights Reserved.
5  *   The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* NVMF FC Transport Unit Test */
35 
36 #include "spdk/env.h"
37 #include "spdk_cunit.h"
38 #include "spdk/nvmf.h"
39 #include "spdk_internal/event.h"
40 #include "spdk/endian.h"
41 #include "spdk/trace.h"
42 #include "spdk_internal/log.h"
43 
44 #include "ut_multithread.c"
45 
46 #include "transport.h"
47 #include "nvmf_internal.h"
48 #include "nvmf_fc.h"
49 
50 #include "json/json_util.c"
51 #include "json/json_write.c"
52 #include "nvmf/nvmf.c"
53 #include "nvmf/transport.c"
54 #include "nvmf/subsystem.c"
55 #include "nvmf/fc.c"
56 #include "nvmf/fc_ls.c"
57 
58 /*
59  * SPDK Stuff
60  */
61 
62 #ifdef SPDK_CONFIG_RDMA
63 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
64 	.type = SPDK_NVME_TRANSPORT_RDMA,
65 	.opts_init = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 
69 	.listen = NULL,
70 	.stop_listen = NULL,
71 	.accept = NULL,
72 
73 	.listener_discover = NULL,
74 
75 	.poll_group_create = NULL,
76 	.poll_group_destroy = NULL,
77 	.poll_group_add = NULL,
78 	.poll_group_poll = NULL,
79 
80 	.req_free = NULL,
81 	.req_complete = NULL,
82 
83 	.qpair_fini = NULL,
84 	.qpair_is_idle = NULL,
85 	.qpair_get_peer_trid = NULL,
86 	.qpair_get_local_trid = NULL,
87 	.qpair_get_listen_trid = NULL,
88 };
89 #endif
90 
91 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
92 	.type = SPDK_NVME_TRANSPORT_TCP,
93 };
94 
95 struct spdk_trace_histories *g_trace_histories;
96 
97 DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
98 				   uint32_t size, uint64_t object_id, uint64_t arg1));
99 DEFINE_STUB(spdk_env_get_core_count, uint32_t, (void), 4);
100 DEFINE_STUB(spdk_nvme_transport_id_compare, int,
101 	    (const struct spdk_nvme_transport_id *trid1,
102 	     const struct spdk_nvme_transport_id *trid2), 0);
103 DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
104 DEFINE_STUB_V(spdk_trace_register_description,
105 	      (const char *name, uint16_t tpoint_id, uint8_t owner_type,
106 	       uint8_t object_type, uint8_t new_object, uint8_t arg1_type,
107 	       const char *arg1_name));
108 DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
109 DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
110 DEFINE_STUB_V(spdk_nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
111 DEFINE_STUB_V(spdk_nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
112 DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
113 	    NULL);
114 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
115 DEFINE_STUB_V(spdk_nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
116 DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
117 				  spdk_bdev_remove_cb_t remove_cb,
118 				  void *remove_ctx, struct spdk_bdev_desc **desc), 0);
119 DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
120 DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
121 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
122 	     struct spdk_bdev_module *module), 0);
123 DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
124 DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
125 DEFINE_STUB(spdk_nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
126 
127 const char *
128 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
129 {
130 	switch (trtype) {
131 	case SPDK_NVME_TRANSPORT_PCIE:
132 		return "PCIe";
133 	case SPDK_NVME_TRANSPORT_RDMA:
134 		return "RDMA";
135 	case SPDK_NVME_TRANSPORT_FC:
136 		return "FC";
137 	default:
138 		return NULL;
139 	}
140 }
141 
142 const char *
143 spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
144 {
145 	switch (adrfam) {
146 	case SPDK_NVMF_ADRFAM_IPV4:
147 		return "IPv4";
148 	case SPDK_NVMF_ADRFAM_IPV6:
149 		return "IPv6";
150 	case SPDK_NVMF_ADRFAM_IB:
151 		return "IB";
152 	case SPDK_NVMF_ADRFAM_FC:
153 		return "FC";
154 	default:
155 		return NULL;
156 	}
157 }
158 
159 const struct spdk_uuid *
160 spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
161 {
162 	return &bdev->uuid;
163 }
164 
165 static bool g_lld_init_called = false;
166 
167 int
168 nvmf_fc_lld_init(void)
169 {
170 	g_lld_init_called = true;
171 	return 0;
172 }
173 
174 static bool g_lld_fini_called = false;
175 
176 void
177 nvmf_fc_lld_fini(void)
178 {
179 	g_lld_fini_called = true;
180 }
181 
182 DEFINE_STUB_V(nvmf_fc_lld_start, (void));
183 DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
184 DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
185 DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
186 DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
187 DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
188 	    0);
189 DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
190 DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
191 DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
192 DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
193 				   uint32_t ersp_len), 0);
194 DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
195 				      struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
196 DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
197 				       struct spdk_nvmf_fc_xchg *xri,
198 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
199 DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
200 				       uint16_t ox_id, uint16_t rx_id,
201 				       uint16_t rpi, bool rjt, uint8_t rjt_exp,
202 				       spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
203 DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
204 		size_t rsp_len), NULL);
205 DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
206 DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
207 					struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
208 					spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
209 DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
210 DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
211 					uint16_t skip_rq), 0);
212 DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
213 		uint64_t *conn_id, uint32_t sq_size), true);
214 DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
215 	    (struct spdk_nvmf_fc_hwqp *queues,
216 	     uint32_t num_queues, uint64_t conn_id), NULL);
217 DEFINE_STUB_V(nvmf_fc_release_conn, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
218 				     uint32_t sq_size));
219 DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
220 					struct spdk_nvmf_fc_hwqp *io_queues,
221 					uint32_t num_io_queues,
222 					struct spdk_nvmf_fc_queue_dump_info *dump_info));
223 DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
224 				     struct spdk_nvmf_fc_xchg_info *info));
225 DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
226 
227 uint32_t
228 nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
229 {
230 	hwqp->lcore_id++;
231 	return 0; /* always return 0 or else it will poll forever */
232 }
233 
234 struct spdk_nvmf_fc_xchg *
235 nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
236 {
237 	static struct spdk_nvmf_fc_xchg xchg;
238 
239 	xchg.xchg_id = 1;
240 	return &xchg;
241 }
242 
243 #define MAX_FC_UT_POLL_THREADS 8
244 static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
245 #define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
246 static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
247 static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
248 uint8_t g_fc_port_handle = 0xff;
249 struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
250 
251 static void
252 _add_transport_done(void *arg, int status)
253 {
254 	CU_ASSERT(status == 0);
255 }
256 
257 static void
258 _add_transport_done_dup_err(void *arg, int status)
259 {
260 	CU_ASSERT(status == -EEXIST);
261 }
262 
263 static void
264 create_transport_test(void)
265 {
266 	const struct spdk_nvmf_transport_ops *ops = NULL;
267 	struct spdk_nvmf_transport_opts opts = { 0 };
268 	struct spdk_nvmf_target_opts tgt_opts = {
269 		.name = "nvmf_test_tgt",
270 		.max_subsystems = 0
271 	};
272 
273 	allocate_threads(8);
274 	set_thread(0);
275 
276 	g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
277 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
278 
279 	ops = spdk_nvmf_get_transport_ops((enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC);
280 	SPDK_CU_ASSERT_FATAL(ops != NULL);
281 
282 	ops->opts_init(&opts);
283 
284 	g_lld_init_called = false;
285 	g_nvmf_tprt = spdk_nvmf_transport_create((enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
286 			&opts);
287 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
288 
289 	CU_ASSERT(g_lld_init_called == true);
290 	CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
291 	CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
292 	CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
293 	CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
294 	CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
295 	CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
296 
297 	set_thread(0);
298 
299 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
300 				    _add_transport_done, 0);
301 	poll_thread(0);
302 
303 	/* Add transport again - should get error */
304 	spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
305 				    _add_transport_done_dup_err, 0);
306 	poll_thread(0);
307 
308 	/* create transport with bad args/options */
309 #ifndef SPDK_CONFIG_RDMA
310 	CU_ASSERT(spdk_nvmf_transport_create(SPDK_NVMF_TRTYPE_RDMA, &opts) == NULL);
311 #endif
312 	CU_ASSERT(spdk_nvmf_transport_create(998, &opts) == NULL);
313 	opts.max_io_size = 1024 ^ 3;
314 	CU_ASSERT(spdk_nvmf_transport_create((enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
315 					     &opts) == NULL);
316 	opts.max_io_size = 999;
317 	opts.io_unit_size = 1024;
318 	CU_ASSERT(spdk_nvmf_transport_create((enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
319 					     &opts) == NULL);
320 }
321 
322 static void
323 port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
324 {
325 	CU_ASSERT(err == 0);
326 	CU_ASSERT(port_handle == 2);
327 	g_fc_port_handle = port_handle;
328 }
329 
330 static void
331 create_fc_port_test(void)
332 {
333 	struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
334 	struct spdk_nvmf_fc_port *fc_port = NULL;
335 	int err;
336 
337 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
338 
339 	init_args.port_handle = 2;
340 	init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
341 	init_args.ls_queue_size = 100;
342 	init_args.io_queue_size = 100;
343 	init_args.io_queues = (void *)lld_q;
344 
345 	set_thread(0);
346 	err = spdk_nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
347 	CU_ASSERT(err == 0);
348 	poll_thread(0);
349 
350 	fc_port = spdk_nvmf_fc_port_lookup(g_fc_port_handle);
351 	CU_ASSERT(fc_port != NULL);
352 }
353 
354 static void
355 online_fc_port_test(void)
356 {
357 	struct spdk_nvmf_fc_port *fc_port;
358 	struct spdk_nvmf_fc_hw_port_online_args args;
359 	int err;
360 
361 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
362 
363 	fc_port = spdk_nvmf_fc_port_lookup(g_fc_port_handle);
364 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
365 
366 	set_thread(0);
367 	args.port_handle = g_fc_port_handle;
368 	err = spdk_nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
369 	CU_ASSERT(err == 0);
370 	poll_threads();
371 	set_thread(0);
372 	if (err == 0) {
373 		uint32_t i;
374 		for (i = 0; i < fc_port->num_io_queues; i++) {
375 			CU_ASSERT(fc_port->io_queues[i].fc_poll_group != 0);
376 			CU_ASSERT(fc_port->io_queues[i].fc_poll_group != 0);
377 			CU_ASSERT(fc_port->io_queues[i].fc_poll_group->hwqp_count != 0);
378 		}
379 	}
380 }
381 
382 static void
383 create_poll_groups_test(void)
384 {
385 	unsigned i;
386 
387 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
388 
389 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
390 		set_thread(i);
391 		g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
392 		poll_thread(i);
393 		CU_ASSERT(g_poll_groups[i] != NULL);
394 	}
395 	set_thread(0);
396 }
397 
398 static void
399 poll_group_poll_test(void)
400 {
401 	unsigned i;
402 	unsigned poll_cnt =  10;
403 	struct spdk_nvmf_fc_port *fc_port = NULL;
404 
405 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
406 
407 	set_thread(0);
408 	fc_port = spdk_nvmf_fc_port_lookup(g_fc_port_handle);
409 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
410 
411 	for (i = 0; i < fc_port->num_io_queues; i++) {
412 		fc_port->io_queues[i].lcore_id = 0;
413 	}
414 
415 	for (i = 0; i < poll_cnt; i++) {
416 		/* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
417 		poll_threads();
418 	}
419 
420 	/* check if hwqp's lcore_id has been updated */
421 	for (i = 0; i < fc_port->num_io_queues; i++) {
422 		CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
423 	}
424 }
425 
426 static void
427 remove_hwqps_from_poll_groups_test(void)
428 {
429 	unsigned i;
430 	struct spdk_nvmf_fc_port *fc_port = NULL;
431 
432 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
433 
434 	fc_port = spdk_nvmf_fc_port_lookup(g_fc_port_handle);
435 	SPDK_CU_ASSERT_FATAL(fc_port != NULL);
436 
437 	for (i = 0; i < fc_port->num_io_queues; i++) {
438 		spdk_nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i]);
439 		poll_threads();
440 		CU_ASSERT(fc_port->io_queues[i].fc_poll_group == 0);
441 	}
442 }
443 
444 static void
445 destroy_transport_test(void)
446 {
447 	unsigned i;
448 
449 	set_thread(0);
450 	SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
451 
452 	for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
453 		set_thread(i);
454 		spdk_nvmf_poll_group_destroy(g_poll_groups[i]);
455 		poll_thread(0);
456 	}
457 
458 	SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
459 	g_lld_fini_called = false;
460 	spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
461 	poll_threads();
462 	CU_ASSERT(g_lld_fini_called == true);
463 }
464 
465 static int
466 nvmf_fc_tests_init(void)
467 {
468 	return 0;
469 }
470 
471 static int
472 nvmf_fc_tests_fini(void)
473 {
474 	free_threads();
475 	return 0;
476 }
477 
478 int main(int argc, char **argv)
479 {
480 	unsigned int num_failures = 0;
481 	CU_pSuite suite = NULL;
482 
483 	if (CU_initialize_registry() != CUE_SUCCESS) {
484 		return CU_get_error();
485 	}
486 
487 	suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
488 	if (suite == NULL) {
489 		CU_cleanup_registry();
490 		return CU_get_error();
491 	}
492 
493 	if (CU_add_test(suite, "Create Target & FC Transport",
494 			create_transport_test) == NULL) {
495 		CU_cleanup_registry();
496 		return CU_get_error();
497 	}
498 
499 	if (CU_add_test(suite, "Create Poll Groups",
500 			create_poll_groups_test) == NULL) {
501 		CU_cleanup_registry();
502 		return CU_get_error();
503 	}
504 
505 	if (CU_add_test(suite, "Create FC Port",
506 			create_fc_port_test) == NULL) {
507 		CU_cleanup_registry();
508 		return CU_get_error();
509 	}
510 
511 
512 	if (CU_add_test(suite, "Online FC Port",
513 			online_fc_port_test) == NULL) {
514 		CU_cleanup_registry();
515 		return CU_get_error();
516 	}
517 
518 	if (CU_add_test(suite, "PG poll", poll_group_poll_test) == NULL) {
519 		CU_cleanup_registry();
520 		return CU_get_error();
521 	}
522 
523 	if (CU_add_test(suite, "Remove HWQP's from PG's",
524 			remove_hwqps_from_poll_groups_test) == NULL) {
525 		CU_cleanup_registry();
526 		return CU_get_error();
527 	}
528 
529 	if (CU_add_test(suite, "Destroy Transport & Target",
530 			destroy_transport_test) == NULL) {
531 		CU_cleanup_registry();
532 		return CU_get_error();
533 	}
534 
535 	CU_basic_set_mode(CU_BRM_VERBOSE);
536 	CU_basic_run_tests();
537 	num_failures = CU_get_number_of_failures();
538 	CU_cleanup_registry();
539 
540 	return num_failures;
541 }
542