xref: /spdk/test/unit/lib/nvmf/transport.c/transport_ut.c (revision aac967c0d312ef9076b316afd934926f687e5336)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2021 Intel Corporation. All rights reserved.
3  */
4 
5 #include "spdk/stdinc.h"
6 #include "spdk_internal/cunit.h"
7 #include "common/lib/ut_multithread.c"
8 #include "common/lib/test_iobuf.c"
9 #include "nvmf/transport.c"
10 #include "nvmf/rdma.c"
11 #include "common/lib/test_rdma.c"
12 
13 SPDK_LOG_REGISTER_COMPONENT(nvmf)
14 
15 #define RDMA_UT_UNITS_IN_MAX_IO 16
16 #define SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE 32
17 
18 struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
19 	.max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
20 	.max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
21 	.in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
22 	.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
23 	.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
24 	.max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
25 	.num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
26 	.opts_size = sizeof(g_rdma_ut_transport_opts)
27 };
28 
29 DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
30 		const struct spdk_nvme_transport_id *trid2), 0);
31 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
32 DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
33 		struct spdk_dif_ctx *dif_ctx), false);
34 DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0);
35 DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
36 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
37 		enum spdk_nvme_transport_type trtype));
38 DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
39 DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
40 DEFINE_STUB(ut_transport_destroy, int, (struct spdk_nvmf_transport *transport,
41 					spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg), 0);
42 DEFINE_STUB(ibv_get_device_name, const char *, (struct ibv_device *device), NULL);
43 DEFINE_STUB(ibv_query_qp, int, (struct ibv_qp *qp, struct ibv_qp_attr *attr,
44 				int attr_mask,
45 				struct ibv_qp_init_attr *init_attr), 0);
46 DEFINE_STUB(rdma_create_id, int, (struct rdma_event_channel *channel,
47 				  struct rdma_cm_id **id, void *context,
48 				  enum rdma_port_space ps), 0);
49 DEFINE_STUB(rdma_bind_addr, int, (struct rdma_cm_id *id, struct sockaddr *addr), 0);
50 DEFINE_STUB(rdma_listen, int, (struct rdma_cm_id *id, int backlog), 0);
51 DEFINE_STUB(rdma_destroy_id, int, (struct rdma_cm_id *id), 0);
52 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
53 DEFINE_STUB(rdma_reject, int, (struct rdma_cm_id *id,
54 			       const void *private_data, uint8_t private_data_len), 0);
55 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
56 DEFINE_STUB_V(rdma_destroy_qp, (struct rdma_cm_id *id));
57 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
58 DEFINE_STUB(ibv_dealloc_pd, int, (struct ibv_pd *pd), 0);
59 DEFINE_STUB(rdma_create_event_channel, struct rdma_event_channel *, (void), NULL);
60 DEFINE_STUB(rdma_get_devices, struct ibv_context **, (int *num_devices), NULL);
61 DEFINE_STUB(ibv_query_device, int, (struct ibv_context *context,
62 				    struct ibv_device_attr *device_attr), 0);
63 DEFINE_STUB(ibv_alloc_pd, struct ibv_pd *, (struct ibv_context *context), NULL);
64 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
65 DEFINE_STUB(ibv_get_async_event, int, (struct ibv_context *context, struct ibv_async_event *event),
66 	    0);
67 DEFINE_STUB(ibv_event_type_str, const char *, (enum ibv_event_type event_type), NULL);
68 DEFINE_STUB_V(ibv_ack_async_event, (struct ibv_async_event *event));
69 DEFINE_STUB(rdma_get_cm_event, int, (struct rdma_event_channel *channel,
70 				     struct rdma_cm_event **event), 0);
71 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
72 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
73 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe,
74 		void *cq_context,
75 		struct ibv_comp_channel *channel,
76 		int comp_vector), NULL);
77 DEFINE_STUB(ibv_wc_status_str, const char *, (enum ibv_wc_status status), NULL);
78 DEFINE_STUB(rdma_get_dst_port, __be16, (struct rdma_cm_id *id), 0);
79 DEFINE_STUB(rdma_get_src_port, __be16, (struct rdma_cm_id *id), 0);
80 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid, int, (struct spdk_nvmf_qpair *qpair,
81 		struct spdk_nvme_transport_id *trid), 0);
82 DEFINE_STUB(spdk_nvme_transport_id_adrfam_str, const char *, (enum spdk_nvmf_adrfam adrfam), NULL);
83 DEFINE_STUB_V(ut_opts_init, (struct spdk_nvmf_transport_opts *opts));
84 DEFINE_STUB(ut_transport_listen, int, (struct spdk_nvmf_transport *transport,
85 				       const struct spdk_nvme_transport_id *trid, struct spdk_nvmf_listen_opts *opts), 0);
86 DEFINE_STUB_V(ut_transport_stop_listen, (struct spdk_nvmf_transport *transport,
87 		const struct spdk_nvme_transport_id *trid));
88 DEFINE_STUB(spdk_mempool_lookup, struct spdk_mempool *, (const char *name), NULL);
89 DEFINE_STUB(spdk_rdma_cm_id_get_numa_id, int32_t, (struct rdma_cm_id *cm_id), 0);
90 
91 /* ibv_reg_mr can be a macro, need to undefine it */
92 #ifdef ibv_reg_mr
93 #undef ibv_reg_mr
94 #endif
95 
96 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
97 struct ibv_mr *
98 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
99 {
100 	HANDLE_RETURN_MOCK(ibv_reg_mr);
101 	if (length > 0) {
102 		return &g_rdma_mr;
103 	} else {
104 		return NULL;
105 	}
106 }
107 
108 struct spdk_nvmf_transport ut_transport = {};
109 
110 static int
111 ut_transport_create(struct spdk_nvmf_transport_opts *opts, spdk_nvmf_transport_create_done_cb cb_fn,
112 		    void *cb_arg)
113 {
114 	cb_fn(cb_arg, &ut_transport);
115 	return 0;
116 }
117 
118 static void
119 test_nvmf_create_transport_done(void *cb_arg, struct spdk_nvmf_transport *transport)
120 {
121 	struct spdk_nvmf_transport **ctx = cb_arg;
122 
123 	*ctx = transport;
124 }
125 
126 static void
127 test_spdk_nvmf_transport_create(void)
128 {
129 	int rc;
130 	struct spdk_nvmf_transport *transport = NULL;
131 	struct nvmf_transport_ops_list_element *ops_element;
132 	struct spdk_iobuf_opts opts_iobuf = {};
133 	struct spdk_nvmf_transport_ops ops = {
134 		.name = "new_ops",
135 		.type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA,
136 		.create_async = ut_transport_create,
137 		.destroy = ut_transport_destroy
138 	};
139 
140 	opts_iobuf.large_bufsize = 0x10000;
141 	opts_iobuf.large_pool_count = 4096;
142 	opts_iobuf.small_bufsize = 0x1000;
143 	opts_iobuf.small_pool_count = 4096;
144 
145 	rc = spdk_iobuf_set_opts(&opts_iobuf);
146 	CU_ASSERT(rc == 0);
147 
148 	/* No available ops element */
149 	rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts,
150 					      test_nvmf_create_transport_done, &transport);
151 	CU_ASSERT(rc != 0);
152 	CU_ASSERT(transport == NULL);
153 
154 	spdk_nvmf_transport_register(&ops);
155 
156 	/* Ensure io_unit_size cannot be set to 0 */
157 	g_rdma_ut_transport_opts.io_unit_size = 0;
158 	rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts,
159 					      test_nvmf_create_transport_done, &transport);
160 	CU_ASSERT(rc != 0);
161 	CU_ASSERT(transport == NULL);
162 
163 	/* Ensure io_unit_size cannot be larger than large_bufsize */
164 	g_rdma_ut_transport_opts.io_unit_size = opts_iobuf.large_bufsize * 2;
165 	rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts,
166 					      test_nvmf_create_transport_done, &transport);
167 	CU_ASSERT(rc != 0);
168 	CU_ASSERT(transport == NULL);
169 
170 	g_rdma_ut_transport_opts.io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE;
171 
172 	/* Create transport successfully */
173 	rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts,
174 					      test_nvmf_create_transport_done, &transport);
175 	CU_ASSERT(rc == 0);
176 	CU_ASSERT(transport == &ut_transport);
177 	CU_ASSERT(!memcmp(&transport->opts, &g_rdma_ut_transport_opts, sizeof(g_rdma_ut_transport_opts)));
178 	CU_ASSERT(!memcmp(transport->ops, &ops, sizeof(ops)));
179 
180 	rc = spdk_nvmf_transport_destroy(transport, NULL, NULL);
181 	CU_ASSERT(rc == 0);
182 
183 	/* transport_opts parameter invalid */
184 	transport = NULL;
185 	g_rdma_ut_transport_opts.max_io_size = 4096;
186 
187 	rc = spdk_nvmf_transport_create_async("new_ops", &g_rdma_ut_transport_opts,
188 					      test_nvmf_create_transport_done, &transport);
189 	CU_ASSERT(rc != 0);
190 	CU_ASSERT(transport == NULL);
191 	g_rdma_ut_transport_opts.max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE *
192 						RDMA_UT_UNITS_IN_MAX_IO);
193 
194 	ops_element = TAILQ_LAST(&g_spdk_nvmf_transport_ops, nvmf_transport_ops_list);
195 	TAILQ_REMOVE(&g_spdk_nvmf_transport_ops, ops_element, link);
196 	free(ops_element);
197 }
198 
199 static struct spdk_nvmf_transport_poll_group *
200 ut_poll_group_create(struct spdk_nvmf_transport *transport,
201 		     struct spdk_nvmf_poll_group *group)
202 {
203 	struct spdk_nvmf_transport_poll_group *tgroup;
204 
205 	tgroup = calloc(1, sizeof(*tgroup));
206 	SPDK_CU_ASSERT_FATAL(tgroup != NULL);
207 	return tgroup;
208 }
209 
210 static void
211 ut_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group)
212 {
213 	free(group);
214 }
215 
216 static void
217 test_nvmf_transport_poll_group_create(void)
218 {
219 	struct spdk_nvmf_transport_poll_group *poll_group = NULL;
220 	struct spdk_nvmf_transport transport = {};
221 	struct spdk_nvmf_transport_ops ops = {};
222 
223 	ops.poll_group_create = ut_poll_group_create;
224 	ops.poll_group_destroy = ut_poll_group_destroy;
225 	transport.ops = &ops;
226 	transport.opts.buf_cache_size = SPDK_NVMF_DEFAULT_BUFFER_CACHE_SIZE;
227 
228 	poll_group = nvmf_transport_poll_group_create(&transport, NULL);
229 	SPDK_CU_ASSERT_FATAL(poll_group != NULL);
230 	CU_ASSERT(poll_group->transport == &transport);
231 
232 	nvmf_transport_poll_group_destroy(poll_group);
233 
234 	poll_group = nvmf_transport_poll_group_create(&transport, NULL);
235 	SPDK_CU_ASSERT_FATAL(poll_group != NULL);
236 	CU_ASSERT(poll_group->transport == &transport);
237 
238 	nvmf_transport_poll_group_destroy(poll_group);
239 }
240 
241 static void
242 test_spdk_nvmf_transport_opts_init(void)
243 {
244 	int rc;
245 	bool rcbool;
246 	size_t opts_size;
247 	struct spdk_nvmf_transport *transport = NULL;
248 	struct spdk_nvmf_transport_opts opts = {};
249 	const struct spdk_nvmf_transport_ops *tops;
250 	struct spdk_nvmf_transport_ops ops = {
251 		.name = "ut_ops",
252 		.type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA,
253 		.create_async = ut_transport_create,
254 		.destroy = ut_transport_destroy,
255 		.opts_init = ut_opts_init
256 	};
257 
258 	spdk_nvmf_transport_register(&ops);
259 	rc = spdk_nvmf_transport_create_async("ut_ops", &g_rdma_ut_transport_opts,
260 					      test_nvmf_create_transport_done, &transport);
261 	CU_ASSERT(rc == 0);
262 	CU_ASSERT(transport == &ut_transport);
263 
264 	tops = nvmf_get_transport_ops(ops.name);
265 	CU_ASSERT(memcmp(tops, &ops, sizeof(struct spdk_nvmf_transport_ops)) == 0);
266 
267 	/* Test1: Invalid parameter: unavailable transport type */
268 	opts_size = sizeof(struct spdk_nvmf_transport_opts);
269 
270 	rcbool = spdk_nvmf_transport_opts_init("invalid_ops", &opts, opts_size);
271 	CU_ASSERT(rcbool == false);
272 
273 	/* Test2: Invalid parameter: NULL pointer */
274 	rcbool = true;
275 
276 	rcbool = spdk_nvmf_transport_opts_init(ops.name, NULL, opts_size);
277 	CU_ASSERT(rcbool == false);
278 
279 	/* Test3: Invalid parameter: opts_size inside opts be zero value */
280 	rcbool = true;
281 	opts_size = 0;
282 
283 	rcbool = spdk_nvmf_transport_opts_init(ops.name, &opts, opts_size);
284 	CU_ASSERT(rcbool == false);
285 
286 	/* Test4: success */
287 	opts.opts_size = 0;
288 	opts_size = sizeof(struct spdk_nvmf_transport_opts);
289 
290 	rcbool = spdk_nvmf_transport_opts_init(ops.name, &opts, opts_size);
291 	CU_ASSERT(rcbool == true);
292 	CU_ASSERT(opts.opts_size == opts_size);
293 
294 	rc = spdk_nvmf_transport_destroy(transport, NULL, NULL);
295 	CU_ASSERT(rc == 0);
296 }
297 
298 static void
299 test_spdk_nvmf_transport_listen_ext(void)
300 {
301 	int rc;
302 	struct spdk_nvmf_transport *transport = NULL;
303 	struct spdk_nvme_transport_id trid1 = {};
304 	struct spdk_nvme_transport_id trid2 = {};
305 	struct spdk_nvmf_listen_opts lopts = {};
306 	struct spdk_nvmf_listener *tlistener;
307 	struct spdk_nvmf_transport_ops ops = {
308 		.name = "ut_ops1",
309 		.type = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA,
310 		.create_async = ut_transport_create,
311 		.destroy = ut_transport_destroy,
312 		.opts_init = ut_opts_init,
313 		.listen = ut_transport_listen,
314 		.stop_listen = ut_transport_stop_listen
315 	};
316 
317 	trid1.trtype = (enum spdk_nvme_transport_type)SPDK_NVMF_TRTYPE_RDMA;
318 	trid1.adrfam = (enum spdk_nvmf_adrfam)SPDK_NVMF_ADRFAM_IPV4;
319 	trid1.priority = 4;
320 	memcpy(trid1.traddr, "192.168.100.72", sizeof("192.168.100.72"));
321 	memcpy(trid1.trsvcid, "4420", sizeof("4420"));
322 
323 	spdk_nvmf_transport_register(&ops);
324 	rc = spdk_nvmf_transport_create_async("ut_ops1", &g_rdma_ut_transport_opts,
325 					      test_nvmf_create_transport_done, &transport);
326 	CU_ASSERT(rc == 0);
327 	CU_ASSERT(transport == &ut_transport);
328 
329 	/* Test1: Execute listen failed */
330 	MOCK_SET(ut_transport_listen, -1);
331 
332 	rc = spdk_nvmf_transport_listen(transport, &trid1, &lopts);
333 	tlistener = nvmf_transport_find_listener(transport, &trid1);
334 	CU_ASSERT(rc == -1);
335 	CU_ASSERT(tlistener == NULL);
336 
337 	/* Test2: Execute listen success */
338 	MOCK_SET(ut_transport_listen, 0);
339 
340 	rc = spdk_nvmf_transport_listen(transport, &trid1, &lopts);
341 	tlistener = nvmf_transport_find_listener(transport, &trid1);
342 	CU_ASSERT(rc == 0);
343 	CU_ASSERT(tlistener != NULL);
344 	CU_ASSERT(tlistener->ref == 1);
345 	CU_ASSERT(memcmp(&tlistener->trid, &trid1, sizeof(trid1)) == 0);
346 
347 	/* Test3: Listen for an identifier repeatedly */
348 	tlistener = NULL;
349 
350 	rc = spdk_nvmf_transport_listen(transport, &trid1, &lopts);
351 	tlistener = nvmf_transport_find_listener(transport, &trid1);
352 	CU_ASSERT(rc == 0);
353 	CU_ASSERT(tlistener != NULL);
354 	CU_ASSERT(tlistener->ref == 2);
355 	CU_ASSERT(memcmp(&tlistener->trid, &trid1, sizeof(trid1)) == 0);
356 
357 	/* Test4: Stop listen when ref >1, Listen will not be released */
358 	tlistener = NULL;
359 
360 	rc = spdk_nvmf_transport_stop_listen(transport, &trid1);
361 	tlistener = nvmf_transport_find_listener(transport, &trid1);
362 	CU_ASSERT(rc == 0);
363 	CU_ASSERT(tlistener != NULL);
364 	CU_ASSERT(tlistener->ref == 1);
365 	CU_ASSERT(memcmp(&tlistener->trid, &trid1, sizeof(trid1)) == 0);
366 
367 	/* Test5: Stop listen when ref == 1, Listen will be released */
368 
369 	rc = spdk_nvmf_transport_stop_listen(transport, &trid1);
370 	tlistener = nvmf_transport_find_listener(transport, &trid1);
371 	CU_ASSERT(rc == 0);
372 	CU_ASSERT(tlistener == NULL);
373 
374 	/* Test6: Release unrecognized listener */
375 	rc = spdk_nvmf_transport_stop_listen(transport, &trid2);
376 
377 	CU_ASSERT(rc == -ENOENT);
378 
379 	rc = spdk_nvmf_transport_destroy(transport, NULL, NULL);
380 	CU_ASSERT(rc == 0);
381 }
382 
383 int
384 main(int argc, char **argv)
385 {
386 	CU_pSuite	suite = NULL;
387 	unsigned int	num_failures;
388 
389 	CU_initialize_registry();
390 
391 	suite = CU_add_suite("nvmf", NULL, NULL);
392 
393 	CU_ADD_TEST(suite, test_spdk_nvmf_transport_create);
394 	CU_ADD_TEST(suite, test_nvmf_transport_poll_group_create);
395 	CU_ADD_TEST(suite, test_spdk_nvmf_transport_opts_init);
396 	CU_ADD_TEST(suite, test_spdk_nvmf_transport_listen_ext);
397 
398 	allocate_threads(1);
399 	set_thread(0);
400 
401 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
402 	CU_cleanup_registry();
403 	return num_failures;
404 }
405