xref: /spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c (revision be4a5602ce7d3e2d9cc7ff6cde0b0dcb99d647c8)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/nvmf_spec.h"
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/mock.h"
39 #include "spdk_internal/thread.h"
40 
41 #include "common/lib/test_env.c"
42 #include "common/lib/test_sock.c"
43 
44 #include "nvmf/ctrlr.c"
45 #include "nvmf/tcp.c"
46 
47 #define UT_IPV4_ADDR "192.168.0.1"
48 #define UT_PORT "4420"
49 #define UT_NVMF_ADRFAM_INVALID 0xf
50 #define UT_MAX_QUEUE_DEPTH 128
51 #define UT_MAX_QPAIRS_PER_CTRLR 128
52 #define UT_IN_CAPSULE_DATA_SIZE 1024
53 #define UT_MAX_IO_SIZE 4096
54 #define UT_IO_UNIT_SIZE 1024
55 #define UT_MAX_AQ_DEPTH 64
56 #define UT_SQ_HEAD_MAX 128
57 #define UT_NUM_SHARED_BUFFERS 128
58 
59 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
60 SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
61 
62 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
63 	    int,
64 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
65 	    0);
66 
67 DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr,
68 	    int,
69 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
70 	    0);
71 
72 DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr,
73 	    struct spdk_nvmf_ctrlr *,
74 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
75 	    NULL);
76 
77 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
78 	    struct spdk_nvmf_subsystem *,
79 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
80 	    NULL);
81 
82 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
83 	    bool,
84 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid),
85 	    true);
86 
87 DEFINE_STUB(spdk_nvmf_transport_qpair_set_sqsize,
88 	    int,
89 	    (struct spdk_nvmf_qpair *qpair),
90 	    0);
91 
92 DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page,
93 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
94 	       uint32_t iovcnt, uint64_t offset, uint32_t length));
95 
96 DEFINE_STUB_V(spdk_nvmf_subsystem_remove_ctrlr,
97 	      (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
98 
99 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
100 	    struct spdk_nvmf_ns *,
101 	    (struct spdk_nvmf_subsystem *subsystem),
102 	    NULL);
103 
104 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
105 	    struct spdk_nvmf_ns *,
106 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
107 	    NULL);
108 
109 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
110 	    bool,
111 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
112 	    true);
113 
114 DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported,
115 	    bool,
116 	    (struct spdk_nvmf_ctrlr *ctrlr),
117 	    false);
118 
119 DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported,
120 	    bool,
121 	    (struct spdk_nvmf_ctrlr *ctrlr),
122 	    false);
123 
124 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_read_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *req),
134 	    0);
135 
136 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_write_zeroes_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *req),
140 	    0);
141 
142 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_flush_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_dsm_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_io,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_get_dif_ctx,
161 	    bool,
162 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
163 	    false);
164 
165 DEFINE_STUB(spdk_nvmf_transport_req_complete,
166 	    int,
167 	    (struct spdk_nvmf_request *req),
168 	    0);
169 
170 DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
171 	      (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
172 	       struct spdk_nvmf_transport *transport));
173 
174 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
175 	    int,
176 	    (struct spdk_sock *sock, struct spdk_sock_group **group),
177 	    0);
178 
179 DEFINE_STUB(spdk_sock_group_get_ctx,
180 	    void *,
181 	    (struct spdk_sock_group *group),
182 	    NULL);
183 
184 DEFINE_STUB(spdk_sock_set_priority,
185 	    int,
186 	    (struct spdk_sock *sock, int priority),
187 	    0);
188 
189 DEFINE_STUB_V(spdk_nvmf_ns_reservation_request, (void *ctx));
190 
191 struct spdk_trace_histories *g_trace_histories;
192 
193 struct spdk_bdev {
194 	int ut_mock;
195 	uint64_t blockcnt;
196 };
197 
198 int
199 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
200 			       const struct spdk_nvme_transport_id *trid2)
201 {
202 	return 0;
203 }
204 
205 void
206 spdk_trace_register_object(uint8_t type, char id_prefix)
207 {
208 }
209 
210 void
211 spdk_trace_register_description(const char *name,
212 				uint16_t tpoint_id, uint8_t owner_type,
213 				uint8_t object_type, uint8_t new_object,
214 				uint8_t arg1_type, const char *arg1_name)
215 {
216 }
217 
218 void
219 _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
220 		   uint32_t size, uint64_t object_id, uint64_t arg1)
221 {
222 }
223 
224 int
225 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
226 {
227 	return 0;
228 }
229 
230 
231 int
232 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
233 			      struct spdk_nvmf_transport_poll_group *group,
234 			      struct spdk_nvmf_transport *transport,
235 			      uint32_t length)
236 {
237 	/* length more than 1 io unit length will fail. */
238 	if (length >= transport->opts.io_unit_size) {
239 		return -EINVAL;
240 	}
241 
242 	req->iovcnt = 1;
243 	req->iov[0].iov_base = (void *)0xDEADBEEF;
244 
245 	return 0;
246 }
247 
248 
249 void
250 spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
251 				 bool dif_insert_or_strip)
252 {
253 	uint64_t num_blocks;
254 
255 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
256 	num_blocks = ns->bdev->blockcnt;
257 	nsdata->nsze = num_blocks;
258 	nsdata->ncap = num_blocks;
259 	nsdata->nuse = num_blocks;
260 	nsdata->nlbaf = 0;
261 	nsdata->flbas.format = 0;
262 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
263 }
264 
265 const char *
266 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
267 {
268 	return subsystem->sn;
269 }
270 
271 const char *
272 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
273 {
274 	return subsystem->mn;
275 }
276 
277 void
278 spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn)
279 {
280 }
281 
282 static void
283 test_nvmf_tcp_create(void)
284 {
285 	struct spdk_thread *thread;
286 	struct spdk_nvmf_transport *transport;
287 	struct spdk_nvmf_tcp_transport *ttransport;
288 	struct spdk_nvmf_transport_opts opts;
289 
290 	thread = spdk_thread_create(NULL, NULL);
291 	SPDK_CU_ASSERT_FATAL(thread != NULL);
292 	spdk_set_thread(thread);
293 
294 	/* case 1 */
295 	memset(&opts, 0, sizeof(opts));
296 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
297 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
298 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
299 	opts.max_io_size = UT_MAX_IO_SIZE;
300 	opts.io_unit_size = UT_IO_UNIT_SIZE;
301 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
302 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
303 	/* expect success */
304 	transport = spdk_nvmf_tcp_create(&opts);
305 	CU_ASSERT_PTR_NOT_NULL(transport);
306 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
307 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
308 	transport->opts = opts;
309 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
310 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
311 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
312 	CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
313 	/* destroy transport */
314 	spdk_mempool_free(ttransport->transport.data_buf_pool);
315 	free(ttransport);
316 
317 	/* case 2 */
318 	memset(&opts, 0, sizeof(opts));
319 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
320 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
321 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
322 	opts.max_io_size = UT_MAX_IO_SIZE;
323 	opts.io_unit_size = UT_MAX_IO_SIZE + 1;
324 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
325 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
326 	/* expect success */
327 	transport = spdk_nvmf_tcp_create(&opts);
328 	CU_ASSERT_PTR_NOT_NULL(transport);
329 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
330 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
331 	transport->opts = opts;
332 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
333 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
334 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
335 	CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
336 	/* destroy transport */
337 	spdk_mempool_free(ttransport->transport.data_buf_pool);
338 	free(ttransport);
339 
340 	/* case 3 */
341 	memset(&opts, 0, sizeof(opts));
342 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
343 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
344 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
345 	opts.max_io_size = UT_MAX_IO_SIZE;
346 	opts.io_unit_size = 16;
347 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
348 	/* expect failse */
349 	transport = spdk_nvmf_tcp_create(&opts);
350 	CU_ASSERT_PTR_NULL(transport);
351 
352 	spdk_thread_exit(thread);
353 	spdk_thread_destroy(thread);
354 }
355 
356 static void
357 test_nvmf_tcp_destroy(void)
358 {
359 	struct spdk_thread *thread;
360 	struct spdk_nvmf_transport *transport;
361 	struct spdk_nvmf_transport_opts opts;
362 
363 	thread = spdk_thread_create(NULL, NULL);
364 	SPDK_CU_ASSERT_FATAL(thread != NULL);
365 	spdk_set_thread(thread);
366 
367 	/* case 1 */
368 	memset(&opts, 0, sizeof(opts));
369 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
370 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
371 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
372 	opts.max_io_size = UT_MAX_IO_SIZE;
373 	opts.io_unit_size = UT_IO_UNIT_SIZE;
374 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
375 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
376 	transport = spdk_nvmf_tcp_create(&opts);
377 	CU_ASSERT_PTR_NOT_NULL(transport);
378 	transport->opts = opts;
379 	/* destroy transport */
380 	CU_ASSERT(spdk_nvmf_tcp_destroy(transport) == 0);
381 
382 	spdk_thread_exit(thread);
383 	spdk_thread_destroy(thread);
384 }
385 
386 static void
387 test_nvmf_tcp_poll_group_create(void)
388 {
389 	struct spdk_nvmf_transport *transport;
390 	struct spdk_nvmf_transport_poll_group *group;
391 	struct spdk_thread *thread;
392 	struct spdk_nvmf_transport_opts opts;
393 	struct spdk_sock_group grp = {};
394 
395 	thread = spdk_thread_create(NULL, NULL);
396 	SPDK_CU_ASSERT_FATAL(thread != NULL);
397 	spdk_set_thread(thread);
398 
399 	memset(&opts, 0, sizeof(opts));
400 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
401 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
402 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
403 	opts.max_io_size = UT_MAX_IO_SIZE;
404 	opts.io_unit_size = UT_IO_UNIT_SIZE;
405 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
406 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
407 	transport = spdk_nvmf_tcp_create(&opts);
408 	CU_ASSERT_PTR_NOT_NULL(transport);
409 	transport->opts = opts;
410 	MOCK_SET(spdk_sock_group_create, &grp);
411 	group = spdk_nvmf_tcp_poll_group_create(transport);
412 	MOCK_CLEAR_P(spdk_sock_group_create);
413 	SPDK_CU_ASSERT_FATAL(group);
414 	group->transport = transport;
415 	spdk_nvmf_tcp_poll_group_destroy(group);
416 	spdk_nvmf_tcp_destroy(transport);
417 
418 	spdk_thread_exit(thread);
419 	spdk_thread_destroy(thread);
420 }
421 
422 static void
423 test_nvmf_tcp_send_c2h_data(void)
424 {
425 	struct spdk_thread *thread;
426 	struct spdk_nvmf_tcp_transport ttransport = {};
427 	struct spdk_nvmf_tcp_qpair tqpair = {};
428 	struct spdk_nvmf_tcp_req tcp_req = {};
429 	struct nvme_tcp_pdu pdu = {};
430 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
431 
432 	pdu.hdr = &pdu.hdr_mem;
433 	thread = spdk_thread_create(NULL, NULL);
434 	SPDK_CU_ASSERT_FATAL(thread != NULL);
435 	spdk_set_thread(thread);
436 
437 	tqpair.qpair.transport = &ttransport.transport;
438 	TAILQ_INIT(&tqpair.free_queue);
439 	TAILQ_INIT(&tqpair.send_queue);
440 	STAILQ_INIT(&tqpair.queued_c2h_data_tcp_req);
441 
442 	/* Set qpair state to make unrelated operations NOP */
443 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
444 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
445 
446 	TAILQ_INSERT_TAIL(&tqpair.free_queue, &pdu, tailq);
447 	tqpair.free_pdu_num++;
448 
449 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
450 
451 	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
452 	tcp_req.req.iov[0].iov_len = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE;
453 	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
454 	tcp_req.req.iov[1].iov_len = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE;
455 	tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
456 	tcp_req.req.iov[2].iov_len = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE;
457 	tcp_req.req.iovcnt = 3;
458 	tcp_req.req.length = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE * 3;
459 
460 	CU_ASSERT(spdk_nvmf_tcp_calc_c2h_data_pdu_num(&tcp_req) == 3);
461 
462 	STAILQ_INSERT_TAIL(&tqpair.queued_c2h_data_tcp_req, &tcp_req, link);
463 
464 	tcp_req.c2h_data_offset = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2;
465 
466 	/* 1st C2H */
467 	spdk_nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
468 
469 	CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
470 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
471 	TAILQ_INSERT_TAIL(&tqpair.free_queue, &pdu, tailq);
472 	tqpair.free_pdu_num++;
473 
474 	c2h_data = &pdu.hdr->c2h_data;
475 	CU_ASSERT(c2h_data->datao == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
476 	CU_ASSERT(c2h_data->datal = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
477 	CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
478 	CU_ASSERT(!(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU));
479 
480 	CU_ASSERT(pdu.data_iovcnt == 2);
481 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
482 	CU_ASSERT(pdu.data_iov[0].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
483 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
484 	CU_ASSERT(pdu.data_iov[1].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
485 
486 	CU_ASSERT(tcp_req.c2h_data_offset == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 3);
487 	CU_ASSERT(STAILQ_FIRST(&tqpair.queued_c2h_data_tcp_req) == &tcp_req);
488 
489 	/* 2nd C2H */
490 	spdk_nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
491 
492 	CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
493 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
494 	TAILQ_INSERT_TAIL(&tqpair.free_queue, &pdu, tailq);
495 	tqpair.free_pdu_num++;
496 
497 	c2h_data = &pdu.hdr->c2h_data;
498 	CU_ASSERT(c2h_data->datao == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 3);
499 	CU_ASSERT(c2h_data->datal = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
500 	CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE);
501 	CU_ASSERT(!(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU));
502 
503 	CU_ASSERT(pdu.data_iovcnt == 2);
504 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
505 	CU_ASSERT(pdu.data_iov[0].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
506 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xC0FFEE);
507 	CU_ASSERT(pdu.data_iov[1].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
508 
509 	CU_ASSERT(tcp_req.c2h_data_offset == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 5);
510 	CU_ASSERT(STAILQ_FIRST(&tqpair.queued_c2h_data_tcp_req) == &tcp_req);
511 
512 	/* 3rd C2H */
513 	spdk_nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
514 
515 	CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
516 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
517 	CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue));
518 
519 	c2h_data = &pdu.hdr->c2h_data;
520 	CU_ASSERT(c2h_data->datao == (NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2) * 5);
521 	CU_ASSERT(c2h_data->datal = NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
522 	CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
523 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
524 
525 	CU_ASSERT(pdu.data_iovcnt == 1);
526 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xC0FFEE + NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
527 	CU_ASSERT(pdu.data_iov[0].iov_len == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE / 2);
528 
529 	CU_ASSERT(tcp_req.c2h_data_offset == NVMF_TCP_PDU_MAX_C2H_DATA_SIZE * 3);
530 	CU_ASSERT(tqpair.c2h_data_pdu_cnt == 3);
531 	CU_ASSERT(STAILQ_EMPTY(&tqpair.queued_c2h_data_tcp_req));
532 
533 	spdk_thread_exit(thread);
534 	spdk_thread_destroy(thread);
535 }
536 
537 static void
538 test_nvmf_tcp_h2c_data_hdr_handle(void)
539 {
540 	struct spdk_nvmf_tcp_transport ttransport = {};
541 	struct spdk_nvmf_tcp_qpair tqpair = {};
542 	struct nvme_tcp_pdu pdu = {};
543 	struct spdk_nvmf_tcp_req tcp_req = {};
544 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
545 
546 	pdu.hdr = &pdu.hdr_mem;
547 	TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
548 	tqpair.maxh2cdata = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE;
549 
550 	/* Set qpair state to make unrelated operations NOP */
551 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
552 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
553 
554 	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
555 	tcp_req.req.iov[0].iov_len = (NVMF_TCP_PDU_MAX_H2C_DATA_SIZE / 2) * 5;
556 	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
557 	tcp_req.req.iov[1].iov_len = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE / 2;
558 	tcp_req.req.iovcnt = 2;
559 	tcp_req.req.length = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE * 3;
560 
561 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
562 	tcp_req.req.cmd->nvme_cmd.cid = 1;
563 	tcp_req.req.length = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE * 3;
564 	tcp_req.ttag = 2;
565 	tcp_req.next_expected_r2t_offset = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE * 2;
566 
567 	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
568 			  &tcp_req, state_link);
569 
570 	h2c_data = &pdu.hdr->h2c_data;
571 	h2c_data->cccid = 1;
572 	h2c_data->ttag = 2;
573 	h2c_data->datao = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE * 2;
574 	h2c_data->datal = NVMF_TCP_PDU_MAX_H2C_DATA_SIZE;
575 
576 	spdk_nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
577 
578 	CU_ASSERT(pdu.data_iovcnt == 2);
579 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + NVMF_TCP_PDU_MAX_H2C_DATA_SIZE * 2);
580 	CU_ASSERT(pdu.data_iov[0].iov_len == NVMF_TCP_PDU_MAX_H2C_DATA_SIZE / 2);
581 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
582 	CU_ASSERT(pdu.data_iov[1].iov_len == NVMF_TCP_PDU_MAX_H2C_DATA_SIZE / 2);
583 
584 	CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) ==
585 		  &tcp_req);
586 	TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
587 		     &tcp_req, state_link);
588 }
589 
590 
591 static void
592 test_nvmf_tcp_incapsule_data_handle(void)
593 {
594 	struct spdk_nvmf_tcp_transport ttransport = {};
595 	struct spdk_nvmf_tcp_qpair tqpair = {};
596 	struct nvme_tcp_pdu pdu = {};
597 	union nvmf_c2h_msg rsp0 = {};
598 	union nvmf_c2h_msg rsp = {};
599 
600 	struct spdk_nvmf_request *req_temp = NULL;
601 	struct spdk_nvmf_tcp_req tcp_req2 = {};
602 	struct spdk_nvmf_tcp_req tcp_req1 = {};
603 	union nvme_tcp_pdu_hdr hdr = {};
604 
605 	struct spdk_nvme_tcp_cmd *capsule_data;
606 	struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
607 	struct spdk_nvme_sgl_descriptor *sgl;
608 
609 	struct spdk_nvmf_transport_poll_group *group;
610 	struct spdk_nvmf_tcp_poll_group tcp_group = {};
611 	struct spdk_sock_group grp = {};
612 	int i = 0;
613 
614 	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
615 	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
616 
617 	tcp_group.sock_group = &grp;
618 	TAILQ_INIT(&tcp_group.qpairs);
619 	group = &tcp_group.group;
620 	group->transport = &ttransport.transport;
621 	STAILQ_INIT(&group->pending_buf_queue);
622 	tqpair.group = &tcp_group;
623 
624 	/* init tqpair, add pdu to pdu_in_progress and wait for the buff */
625 	pdu.hdr = &pdu.hdr_mem;
626 	for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
627 		TAILQ_INIT(&tqpair.state_queue[i]);
628 	}
629 	TAILQ_INIT(&tqpair.free_queue);
630 	TAILQ_INIT(&tqpair.send_queue);
631 	STAILQ_INIT(&tqpair.queued_c2h_data_tcp_req);
632 
633 	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link);
634 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
635 	tqpair.qpair.transport = &ttransport.transport;
636 	tqpair.pdu_in_progress.hdr = &hdr;
637 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
638 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
639 
640 	/* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
641 	tcp_req2.req.qpair = &tqpair.qpair;
642 	tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
643 	tcp_req2.req.rsp = &rsp;
644 
645 	/* init tcp_req1 */
646 	tcp_req1.req.qpair = &tqpair.qpair;
647 	tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
648 	tcp_req1.req.rsp = &rsp0;
649 	tcp_req1.state = TCP_REQUEST_STATE_NEW;
650 
651 	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link);
652 	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
653 
654 	/* init pdu, make pdu need sgl buff */
655 	capsule_data = &pdu.hdr->capsule_cmd;
656 	nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu.hdr->capsule_cmd.ccsqe;
657 	sgl = &capsule_data->ccsqe.dptr.sgl1;
658 
659 	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
660 	capsule_data->common.hlen = sizeof(*capsule_data);
661 	capsule_data->common.plen = 1096;
662 	capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
663 
664 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
665 	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
666 	sgl = &pdu.hdr->capsule_cmd.ccsqe.dptr.sgl1;
667 	sgl->unkeyed.length = UT_IO_UNIT_SIZE;
668 
669 	nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
670 	tqpair.pdu_in_progress = pdu;
671 
672 	/* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */
673 	spdk_nvmf_tcp_req_process(&ttransport, &tcp_req1);
674 	CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
675 
676 	sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
677 
678 	/* process tqpair capsule req. but we still remain req in pending_buff. */
679 	spdk_nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress);
680 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
681 	CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
682 	STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
683 		if (req_temp == &tcp_req2.req) {
684 			break;
685 		}
686 	}
687 	CU_ASSERT(req_temp == NULL);
688 	CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
689 }
690 
691 
692 int main(int argc, char **argv)
693 {
694 	CU_pSuite	suite = NULL;
695 	unsigned int	num_failures;
696 
697 	if (CU_initialize_registry() != CUE_SUCCESS) {
698 		return CU_get_error();
699 	}
700 
701 	suite = CU_add_suite("nvmf", NULL, NULL);
702 	if (suite == NULL) {
703 		CU_cleanup_registry();
704 		return CU_get_error();
705 	}
706 
707 	if (
708 		CU_add_test(suite, "nvmf_tcp_create", test_nvmf_tcp_create) == NULL ||
709 		CU_add_test(suite, "nvmf_tcp_destroy", test_nvmf_tcp_destroy) == NULL ||
710 		CU_add_test(suite, "nvmf_tcp_poll_group_create", test_nvmf_tcp_poll_group_create) == NULL ||
711 		CU_add_test(suite, "nvmf_tcp_send_c2h_data", test_nvmf_tcp_send_c2h_data) == NULL ||
712 		CU_add_test(suite, "nvmf_tcp_h2c_data_hdr_handle", test_nvmf_tcp_h2c_data_hdr_handle) == NULL	||
713 		CU_add_test(suite, "nvmf_tcp_incapsule_test", test_nvmf_tcp_incapsule_data_handle) == NULL
714 	) {
715 		CU_cleanup_registry();
716 		return CU_get_error();
717 	}
718 
719 	CU_basic_set_mode(CU_BRM_VERBOSE);
720 	CU_basic_run_tests();
721 	num_failures = CU_get_number_of_failures();
722 	CU_cleanup_registry();
723 	return num_failures;
724 }
725