xref: /spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c (revision b78e763c1af2ace4c19d2932065a43357e3f5d3e)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk_internal/mock.h"
38 #include "spdk_internal/thread.h"
39 
40 #include "common/lib/test_env.c"
41 #include "nvmf/ctrlr.c"
42 #include "nvmf/tcp.c"
43 
44 #define UT_IPV4_ADDR "192.168.0.1"
45 #define UT_PORT "4420"
46 #define UT_NVMF_ADRFAM_INVALID 0xf
47 #define UT_MAX_QUEUE_DEPTH 128
48 #define UT_MAX_QPAIRS_PER_CTRLR 128
49 #define UT_IN_CAPSULE_DATA_SIZE 1024
50 #define UT_MAX_IO_SIZE 4096
51 #define UT_IO_UNIT_SIZE 1024
52 #define UT_MAX_AQ_DEPTH 64
53 #define UT_SQ_HEAD_MAX 128
54 #define UT_NUM_SHARED_BUFFERS 128
55 
56 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
57 SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
58 
59 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
60 	    int,
61 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
62 	    0);
63 
64 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr,
65 	    int,
66 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
67 	    0);
68 
69 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr,
70 	    struct spdk_nvmf_ctrlr *,
71 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
72 	    NULL);
73 
74 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
75 	    struct spdk_nvmf_subsystem *,
76 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
77 	    NULL);
78 
79 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
80 	    bool,
81 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid),
82 	    true);
83 
84 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize,
85 	    int,
86 	    (struct spdk_nvmf_qpair *qpair),
87 	    0);
88 
89 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page,
90 	      (struct spdk_nvmf_tgt *tgt, struct iovec *iov, uint32_t iovcnt, uint64_t offset, uint32_t length));
91 
92 DEFINE_STUB_V(spdk_nvmf_subsystem_remove_ctrlr,
93 	      (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
94 
95 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
96 	    struct spdk_nvmf_ns *,
97 	    (struct spdk_nvmf_subsystem *subsystem),
98 	    NULL);
99 
100 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
101 	    struct spdk_nvmf_ns *,
102 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
103 	    NULL);
104 
105 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
106 	    bool,
107 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
108 	    true);
109 
110 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported,
111 	    bool,
112 	    (struct spdk_nvmf_ctrlr *ctrlr),
113 	    false);
114 
115 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported,
116 	    bool,
117 	    (struct spdk_nvmf_ctrlr *ctrlr),
118 	    false);
119 
120 DEFINE_STUB(spdk_nvmf_request_complete,
121 	    int,
122 	    (struct spdk_nvmf_request *req),
123 	    -1);
124 
125 DEFINE_STUB(spdk_nvmf_request_free,
126 	    int,
127 	    (struct spdk_nvmf_request *req),
128 	    -1);
129 
130 struct spdk_trace_histories *g_trace_histories;
131 
132 struct spdk_bdev {
133 	int ut_mock;
134 	uint64_t blockcnt;
135 };
136 
137 int
138 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
139 			       const struct spdk_nvme_transport_id *trid2)
140 {
141 	return 0;
142 }
143 
144 void
145 spdk_trace_register_object(uint8_t type, char id_prefix)
146 {
147 }
148 
149 void
150 spdk_trace_register_description(const char *name, const char *short_name,
151 				uint16_t tpoint_id, uint8_t owner_type,
152 				uint8_t object_type, uint8_t new_object,
153 				uint8_t arg1_is_ptr, const char *arg1_name)
154 {
155 }
156 
157 void
158 _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
159 		   uint32_t size, uint64_t object_id, uint64_t arg1)
160 {
161 }
162 
163 int
164 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
165 {
166 	return 0;
167 }
168 
169 void
170 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata)
171 {
172 	uint64_t num_blocks;
173 
174 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
175 	num_blocks = ns->bdev->blockcnt;
176 	nsdata->nsze = num_blocks;
177 	nsdata->ncap = num_blocks;
178 	nsdata->nuse = num_blocks;
179 	nsdata->nlbaf = 0;
180 	nsdata->flbas.format = 0;
181 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
182 }
183 
184 const char *
185 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
186 {
187 	return subsystem->sn;
188 }
189 
190 void
191 spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn)
192 {
193 }
194 
195 void
196 spdk_nvmf_request_exec(struct spdk_nvmf_request *req)
197 {
198 }
199 
200 static void
201 test_nvmf_tcp_create(void)
202 {
203 	struct spdk_thread *thread;
204 	struct spdk_nvmf_transport *transport;
205 	struct spdk_nvmf_tcp_transport *ttransport;
206 	struct spdk_nvmf_transport_opts opts;
207 
208 	thread = spdk_thread_create(NULL);
209 	SPDK_CU_ASSERT_FATAL(thread != NULL);
210 	spdk_set_thread(thread);
211 
212 	/* case 1 */
213 	memset(&opts, 0, sizeof(opts));
214 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
215 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
216 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
217 	opts.max_io_size = UT_MAX_IO_SIZE;
218 	opts.io_unit_size = UT_IO_UNIT_SIZE;
219 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
220 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
221 	/* expect success */
222 	transport = spdk_nvmf_tcp_create(&opts);
223 	CU_ASSERT_PTR_NOT_NULL(transport);
224 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
225 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
226 	transport->opts = opts;
227 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
228 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
229 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
230 	CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
231 	/* destroy transport */
232 	spdk_mempool_free(ttransport->transport.data_buf_pool);
233 	spdk_io_device_unregister(ttransport, NULL);
234 	free(ttransport);
235 
236 	/* case 2 */
237 	memset(&opts, 0, sizeof(opts));
238 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
239 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
240 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
241 	opts.max_io_size = UT_MAX_IO_SIZE;
242 	opts.io_unit_size = UT_MAX_IO_SIZE + 1;
243 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
244 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
245 	/* expect success */
246 	transport = spdk_nvmf_tcp_create(&opts);
247 	CU_ASSERT_PTR_NOT_NULL(transport);
248 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
249 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
250 	transport->opts = opts;
251 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
252 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
253 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
254 	CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
255 	/* destroy transport */
256 	spdk_mempool_free(ttransport->transport.data_buf_pool);
257 	spdk_io_device_unregister(ttransport, NULL);
258 	free(ttransport);
259 
260 	/* case 3 */
261 	memset(&opts, 0, sizeof(opts));
262 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
263 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
264 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
265 	opts.max_io_size = UT_MAX_IO_SIZE;
266 	opts.io_unit_size = 16;
267 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
268 	/* expect failse */
269 	transport = spdk_nvmf_tcp_create(&opts);
270 	CU_ASSERT_PTR_NULL(transport);
271 
272 	spdk_thread_exit(thread);
273 }
274 
275 static void
276 test_nvmf_tcp_destroy(void)
277 {
278 	struct spdk_thread *thread;
279 	struct spdk_nvmf_transport *transport;
280 	struct spdk_nvmf_transport_opts opts;
281 
282 	thread = spdk_thread_create(NULL);
283 	SPDK_CU_ASSERT_FATAL(thread != NULL);
284 	spdk_set_thread(thread);
285 
286 	/* case 1 */
287 	memset(&opts, 0, sizeof(opts));
288 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
289 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
290 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
291 	opts.max_io_size = UT_MAX_IO_SIZE;
292 	opts.io_unit_size = UT_IO_UNIT_SIZE;
293 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
294 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
295 	transport = spdk_nvmf_tcp_create(&opts);
296 	CU_ASSERT_PTR_NOT_NULL(transport);
297 	transport->opts = opts;
298 	/* destroy transport */
299 	CU_ASSERT(spdk_nvmf_tcp_destroy(transport) == 0);
300 
301 	spdk_thread_exit(thread);
302 }
303 
304 static void
305 test_nvmf_tcp_poll_group_create(void)
306 {
307 	struct spdk_nvmf_transport *transport;
308 	struct spdk_nvmf_transport_poll_group *group;
309 	struct spdk_thread *thread;
310 	struct spdk_nvmf_transport_opts opts;
311 
312 	thread = spdk_thread_create(NULL);
313 	SPDK_CU_ASSERT_FATAL(thread != NULL);
314 	spdk_set_thread(thread);
315 
316 	memset(&opts, 0, sizeof(opts));
317 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
318 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
319 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
320 	opts.max_io_size = UT_MAX_IO_SIZE;
321 	opts.io_unit_size = UT_IO_UNIT_SIZE;
322 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
323 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
324 	transport = spdk_nvmf_tcp_create(&opts);
325 	CU_ASSERT_PTR_NOT_NULL(transport);
326 	transport->opts = opts;
327 	group = spdk_nvmf_tcp_poll_group_create(transport);
328 	SPDK_CU_ASSERT_FATAL(group);
329 	group->transport = transport;
330 	spdk_nvmf_tcp_poll_group_destroy(group);
331 
332 	spdk_thread_exit(thread);
333 }
334 
335 static void
336 test_nvmf_tcp_qpair_is_idle(void)
337 {
338 	struct nvme_tcp_qpair tqpair;
339 
340 	memset(&tqpair, 0, sizeof(tqpair));
341 
342 	/* case 1 */
343 	tqpair.max_queue_depth = 0;
344 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE] = 0;
345 	CU_ASSERT(spdk_nvmf_tcp_qpair_is_idle(&tqpair.qpair) == true);
346 
347 	/* case 2 */
348 	tqpair.max_queue_depth = UT_MAX_QUEUE_DEPTH;
349 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE] = 0;
350 	CU_ASSERT(spdk_nvmf_tcp_qpair_is_idle(&tqpair.qpair) == false);
351 
352 	/* case 3 */
353 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE] = 1;
354 	CU_ASSERT(spdk_nvmf_tcp_qpair_is_idle(&tqpair.qpair) == false);
355 
356 	/* case 4 */
357 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE] = UT_MAX_QUEUE_DEPTH;
358 	CU_ASSERT(spdk_nvmf_tcp_qpair_is_idle(&tqpair.qpair) == true);
359 }
360 
361 int main(int argc, char **argv)
362 {
363 	CU_pSuite	suite = NULL;
364 	unsigned int	num_failures;
365 
366 	if (CU_initialize_registry() != CUE_SUCCESS) {
367 		return CU_get_error();
368 	}
369 
370 	suite = CU_add_suite("nvmf", NULL, NULL);
371 	if (suite == NULL) {
372 		CU_cleanup_registry();
373 		return CU_get_error();
374 	}
375 
376 	if (
377 		CU_add_test(suite, "nvmf_tcp_create", test_nvmf_tcp_create) == NULL ||
378 		CU_add_test(suite, "nvmf_tcp_destroy", test_nvmf_tcp_destroy) == NULL ||
379 		CU_add_test(suite, "nvmf_tcp_poll_group_create", test_nvmf_tcp_poll_group_create) == NULL ||
380 		CU_add_test(suite, "nvmf_tcp_qpair_is_idle", test_nvmf_tcp_qpair_is_idle) == NULL
381 	) {
382 		CU_cleanup_registry();
383 		return CU_get_error();
384 	}
385 
386 	CU_basic_set_mode(CU_BRM_VERBOSE);
387 	CU_basic_run_tests();
388 	num_failures = CU_get_number_of_failures();
389 	CU_cleanup_registry();
390 	return num_failures;
391 }
392