xref: /spdk/test/unit/lib/nvmf/transport.c/transport_ut.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include "spdk/stdinc.h"
34 #include "spdk_cunit.h"
35 #include "common/lib/test_env.c"
36 #include "nvmf/transport.c"
37 #include "nvmf/rdma.c"
38 #include "common/lib/test_rdma.c"
39 
40 SPDK_LOG_REGISTER_COMPONENT(nvmf)
41 
42 #define RDMA_UT_UNITS_IN_MAX_IO 16
43 #define SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE 32
44 
45 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
46 	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
47 	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
48 	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
49 	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
50 	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
51 	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
52 	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
53 	.opts_size = sizeof(g_rdma_ut_transport_opts)
54 };
55 
56 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
57 		const struct spdk_nvme_transport_id *trid2), 0);
58 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
59 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
60 		struct spdk_dif_ctx *dif_ctx), false);
61 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
62 		nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
63 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
64 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
65 		enum spdk_nvme_transport_type trtype));
66 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
67 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
68 DEFINE_STUB(ut_transport_create, struct spdk_nvmf_transport *,
69 	    (struct spdk_nvmf_transport_opts *opts), NULL);
70 DEFINE_STUB(ut_transport_destroy, int, (struct spdk_nvmf_transport *transport,
71 					spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg), 0);
72 DEFINE_STUB(ibv_get_device_name, const char *, (struct ibv_device *device), NULL);
73 DEFINE_STUB(ibv_query_qp, int, (struct ibv_qp *qp, struct ibv_qp_attr *attr,
74 				int attr_mask,
75 				struct ibv_qp_init_attr *init_attr), 0);
76 DEFINE_STUB(rdma_create_id, int, (struct rdma_event_channel *channel,
77 				  struct rdma_cm_id **id, void *context,
78 				  enum rdma_port_space ps), 0);
79 DEFINE_STUB(rdma_bind_addr, int, (struct rdma_cm_id *id, struct sockaddr *addr), 0);
80 DEFINE_STUB(rdma_listen, int, (struct rdma_cm_id *id, int backlog), 0);
81 DEFINE_STUB(rdma_destroy_id, int, (struct rdma_cm_id *id), 0);
82 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
83 DEFINE_STUB(rdma_reject, int, (struct rdma_cm_id *id,
84 			       const void *private_data, uint8_t private_data_len), 0);
85 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
86 DEFINE_STUB_V(rdma_destroy_qp, (struct rdma_cm_id *id));
87 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
88 DEFINE_STUB(ibv_dealloc_pd, int, (struct ibv_pd *pd), 0);
89 DEFINE_STUB(rdma_create_event_channel, struct rdma_event_channel *, (void), NULL);
90 DEFINE_STUB(rdma_get_devices, struct ibv_context **, (int *num_devices), NULL);
91 DEFINE_STUB(ibv_query_device, int, (struct ibv_context *context,
92 				    struct ibv_device_attr *device_attr), 0);
93 DEFINE_STUB(ibv_alloc_pd, struct ibv_pd *, (struct ibv_context *context), NULL);
94 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
95 DEFINE_STUB(ibv_get_async_event, int, (struct ibv_context *context, struct ibv_async_event *event),
96 	    0);
97 DEFINE_STUB(ibv_event_type_str, const char *, (enum ibv_event_type event_type), NULL);
98 DEFINE_STUB_V(ibv_ack_async_event, (struct ibv_async_event *event));
99 DEFINE_STUB(rdma_get_cm_event, int, (struct rdma_event_channel *channel,
100 				     struct rdma_cm_event **event), 0);
101 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
102 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
103 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe,
104 		void *cq_context,
105 		struct ibv_comp_channel *channel,
106 		int comp_vector), NULL);
107 DEFINE_STUB(ibv_wc_status_str, const char *, (enum ibv_wc_status status), NULL);
108 DEFINE_STUB(rdma_get_dst_port, __be16, (struct rdma_cm_id *id), 0);
109 DEFINE_STUB(rdma_get_src_port, __be16, (struct rdma_cm_id *id), 0);
110 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
111 		struct spdk_nvme_transport_id *trid), 0);
112 DEFINE_STUB(ibv_reg_mr_iova2, struct ibv_mr *, (struct ibv_pd *pd, void *addr, size_t length,
113 		uint64_t iova, unsigned int access), NULL);
114 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
115 
116 /* ibv_reg_mr can be a macro, need to undefine it */
117 #ifdef ibv_reg_mr
118 #undef ibv_reg_mr
119 #endif
120 
121 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
122 struct ibv_mr *
123 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
124 {
125 	HANDLE_RETURN_MOCK(ibv_reg_mr);
126 	if (length > 0) {
127 		return &g_rdma_mr;
128 	} else {
129 		return NULL;
130 	}
131 }
132 
133 static void
134 test_spdk_nvmf_transport_create(void)
135 {
136 	int rc;
137 	struct spdk_nvmf_transport ut_transport = {};
138 	struct spdk_nvmf_transport *transport = NULL;
139 	struct nvmf_transport_ops_list_element *ops_element;
140 	struct spdk_nvmf_transport_ops ops = {
141 		.name = "new_ops",
142 		.type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA,
143 		.create = ut_transport_create,
144 		.destroy = ut_transport_destroy
145 	};
146 
147 	/* No available ops element */
148 	transport = spdk_nvmf_transport_create("new_ops", &g_rdma_ut_transport_opts);
149 	CU_ASSERT(transport == NULL);
150 
151 	/* Create transport successfully */
152 	MOCK_SET(ut_transport_create, &ut_transport);
153 	spdk_nvmf_transport_register(&ops);
154 
155 	transport = spdk_nvmf_transport_create("new_ops", &g_rdma_ut_transport_opts);
156 	CU_ASSERT(transport == &ut_transport);
157 	CU_ASSERT(!memcmp(&transport->opts, &g_rdma_ut_transport_opts, sizeof(g_rdma_ut_transport_opts)));
158 	CU_ASSERT(!memcmp(transport->ops, &ops, sizeof(ops)));
159 	CU_ASSERT(transport->data_buf_pool != NULL);
160 
161 	rc = spdk_nvmf_transport_destroy(transport, NULL, NULL);
162 	CU_ASSERT(rc == 0);
163 
164 	/* transport_opts parameter invalid */
165 	g_rdma_ut_transport_opts.max_io_size = 4096;
166 
167 	transport = spdk_nvmf_transport_create("new_ops", &g_rdma_ut_transport_opts);
168 	CU_ASSERT(transport == NULL);
169 	g_rdma_ut_transport_opts.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE *
170 						RDMA_UT_UNITS_IN_MAX_IO);
171 
172 	ops_element = TAILQ_LAST(&g_spdk_nvmf_transport_ops, nvmf_transport_ops_list);
173 	TAILQ_REMOVE(&g_spdk_nvmf_transport_ops, ops_element, link);
174 	free(ops_element);
175 	MOCK_CLEAR(ut_transport_create);
176 }
177 
178 static struct spdk_nvmf_transport_poll_group *
179 ut_poll_group_create(struct spdk_nvmf_transport *transport)
180 {
181 	struct spdk_nvmf_transport_poll_group *group;
182 
183 	group = calloc(1, sizeof(*group));
184 	SPDK_CU_ASSERT_FATAL(group != NULL);
185 	return group;
186 }
187 
188 static void
189 ut_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
190 {
191 	free(group);
192 }
193 
194 static void
195 test_nvmf_transport_poll_group_create(void)
196 {
197 	struct spdk_nvmf_transport_poll_group *poll_group = NULL;
198 	struct spdk_nvmf_transport transport = {};
199 	struct spdk_nvmf_transport_ops ops = {};
200 
201 	ops.poll_group_create = ut_poll_group_create;
202 	ops.poll_group_destroy = ut_poll_group_destroy;
203 	transport.ops = &ops;
204 	transport.opts.buf_cache_size = SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE;
205 	transport.data_buf_pool = spdk_mempool_create("buf_pool", 32, 4096, 0, 0);
206 
207 	poll_group = nvmf_transport_poll_group_create(&transport);
208 	SPDK_CU_ASSERT_FATAL(poll_group != NULL);
209 	CU_ASSERT(poll_group->transport == &transport);
210 	CU_ASSERT(poll_group->buf_cache_size == SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE);
211 	CU_ASSERT(poll_group->buf_cache_count == SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE);
212 
213 	nvmf_transport_poll_group_destroy(poll_group);
214 	spdk_mempool_free(transport.data_buf_pool);
215 
216 	/* Mempool members insufficient */
217 	transport.data_buf_pool = spdk_mempool_create("buf_pool", 31, 4096, 0, 0);
218 
219 	poll_group = nvmf_transport_poll_group_create(&transport);
220 	SPDK_CU_ASSERT_FATAL(poll_group != NULL);
221 	CU_ASSERT(poll_group->transport == &transport);
222 	CU_ASSERT(poll_group->buf_cache_size == 31);
223 	CU_ASSERT(poll_group->buf_cache_count == 31);
224 
225 	nvmf_transport_poll_group_destroy(poll_group);
226 	spdk_mempool_free(transport.data_buf_pool);
227 }
228 
229 int main(int argc, char **argv)
230 {
231 	CU_pSuite	suite = NULL;
232 	unsigned int	num_failures;
233 
234 	CU_set_error_action(CUEA_ABORT);
235 	CU_initialize_registry();
236 
237 	suite = CU_add_suite("nvmf", NULL, NULL);
238 
239 	CU_ADD_TEST(suite, test_spdk_nvmf_transport_create);
240 	CU_ADD_TEST(suite, test_nvmf_transport_poll_group_create);
241 
242 	CU_basic_set_mode(CU_BRM_VERBOSE);
243 	CU_basic_run_tests();
244 	num_failures = CU_get_number_of_failures();
245 	CU_cleanup_registry();
246 	return num_failures;
247 }
248