xref: /spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c (revision c5c9a150a3759736f5f2b9bd0249ecf5ce87174a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 #include "spdk/nvmf_spec.h"
36 #include "spdk_cunit.h"
37 
38 #include "spdk_internal/mock.h"
39 #include "spdk_internal/thread.h"
40 
41 #include "common/lib/test_env.c"
42 #include "common/lib/test_sock.c"
43 
44 #include "nvmf/ctrlr.c"
45 #include "nvmf/tcp.c"
46 
47 #define UT_IPV4_ADDR "192.168.0.1"
48 #define UT_PORT "4420"
49 #define UT_NVMF_ADRFAM_INVALID 0xf
50 #define UT_MAX_QUEUE_DEPTH 128
51 #define UT_MAX_QPAIRS_PER_CTRLR 128
52 #define UT_IN_CAPSULE_DATA_SIZE 1024
53 #define UT_MAX_IO_SIZE 4096
54 #define UT_IO_UNIT_SIZE 1024
55 #define UT_MAX_AQ_DEPTH 64
56 #define UT_SQ_HEAD_MAX 128
57 #define UT_NUM_SHARED_BUFFERS 128
58 
59 SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
60 
61 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
62 	    int,
63 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
64 	    0);
65 
66 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
67 	    int,
68 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
69 	    0);
70 
71 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
72 	    struct spdk_nvmf_ctrlr *,
73 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
74 	    NULL);
75 
76 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
77 	    struct spdk_nvmf_subsystem *,
78 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
79 	    NULL);
80 
81 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
82 	    bool,
83 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
84 	    true);
85 
86 DEFINE_STUB_V(nvmf_get_discovery_log_page,
87 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
88 	       uint32_t iovcnt, uint64_t offset, uint32_t length));
89 
90 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr,
91 	      (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
92 
93 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
94 	    struct spdk_nvmf_ns *,
95 	    (struct spdk_nvmf_subsystem *subsystem),
96 	    NULL);
97 
98 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
99 	    struct spdk_nvmf_ns *,
100 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
101 	    NULL);
102 
103 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
104 	    bool,
105 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
106 	    true);
107 
108 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
109 	    bool,
110 	    (struct spdk_nvmf_ctrlr *ctrlr),
111 	    false);
112 
113 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
114 	    bool,
115 	    (struct spdk_nvmf_ctrlr *ctrlr),
116 	    false);
117 
118 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
119 	    int,
120 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
121 	     struct spdk_nvmf_request *req),
122 	    0);
123 
124 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
125 	    int,
126 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
127 	     struct spdk_nvmf_request *req),
128 	    0);
129 
130 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
131 	    int,
132 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
133 	     struct spdk_nvmf_request *req),
134 	    0);
135 
136 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
137 	    int,
138 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
139 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
140 	    0);
141 
142 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
143 	    int,
144 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
145 	     struct spdk_nvmf_request *req),
146 	    0);
147 
148 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
149 	    int,
150 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
151 	     struct spdk_nvmf_request *req),
152 	    0);
153 
154 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
155 	    int,
156 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
157 	     struct spdk_nvmf_request *req),
158 	    0);
159 
160 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
161 	    int,
162 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
163 	     struct spdk_nvmf_request *req),
164 	    0);
165 
166 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx,
167 	    bool,
168 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
169 	    false);
170 
171 DEFINE_STUB(nvmf_transport_req_complete,
172 	    int,
173 	    (struct spdk_nvmf_request *req),
174 	    0);
175 
176 DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
177 	      (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
178 	       struct spdk_nvmf_transport *transport));
179 
180 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
181 	    int,
182 	    (struct spdk_sock *sock, struct spdk_sock_group **group),
183 	    0);
184 
185 DEFINE_STUB(spdk_sock_group_get_ctx,
186 	    void *,
187 	    (struct spdk_sock_group *group),
188 	    NULL);
189 
190 DEFINE_STUB(spdk_sock_set_priority,
191 	    int,
192 	    (struct spdk_sock *sock, int priority),
193 	    0);
194 
195 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
196 
197 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
198 		enum spdk_nvme_transport_type trtype));
199 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops));
200 
201 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
202 
203 struct spdk_trace_histories *g_trace_histories;
204 
205 struct spdk_bdev {
206 	int ut_mock;
207 	uint64_t blockcnt;
208 };
209 
210 int
211 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
212 			       const struct spdk_nvme_transport_id *trid2)
213 {
214 	return 0;
215 }
216 
217 void
218 spdk_trace_register_object(uint8_t type, char id_prefix)
219 {
220 }
221 
222 void
223 spdk_trace_register_description(const char *name,
224 				uint16_t tpoint_id, uint8_t owner_type,
225 				uint8_t object_type, uint8_t new_object,
226 				uint8_t arg1_type, const char *arg1_name)
227 {
228 }
229 
230 void
231 _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
232 		   uint32_t size, uint64_t object_id, uint64_t arg1)
233 {
234 }
235 
236 const char *
237 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
238 {
239 	switch (trtype) {
240 	case SPDK_NVME_TRANSPORT_PCIE:
241 		return "PCIe";
242 	case SPDK_NVME_TRANSPORT_RDMA:
243 		return "RDMA";
244 	case SPDK_NVME_TRANSPORT_FC:
245 		return "FC";
246 	default:
247 		return NULL;
248 	}
249 }
250 
251 int
252 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
253 {
254 	int len, i;
255 
256 	if (trstring == NULL) {
257 		return -EINVAL;
258 	}
259 
260 	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
261 	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
262 		return -EINVAL;
263 	}
264 
265 	/* cast official trstring to uppercase version of input. */
266 	for (i = 0; i < len; i++) {
267 		trid->trstring[i] = toupper(trstring[i]);
268 	}
269 	return 0;
270 }
271 
272 int
273 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
274 {
275 	return 0;
276 }
277 
278 int
279 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
280 			      struct spdk_nvmf_transport_poll_group *group,
281 			      struct spdk_nvmf_transport *transport,
282 			      uint32_t length)
283 {
284 	/* length more than 1 io unit length will fail. */
285 	if (length >= transport->opts.io_unit_size) {
286 		return -EINVAL;
287 	}
288 
289 	req->iovcnt = 1;
290 	req->iov[0].iov_base = (void *)0xDEADBEEF;
291 
292 	return 0;
293 }
294 
295 
296 void
297 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
298 			    bool dif_insert_or_strip)
299 {
300 	uint64_t num_blocks;
301 
302 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
303 	num_blocks = ns->bdev->blockcnt;
304 	nsdata->nsze = num_blocks;
305 	nsdata->ncap = num_blocks;
306 	nsdata->nuse = num_blocks;
307 	nsdata->nlbaf = 0;
308 	nsdata->flbas.format = 0;
309 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
310 }
311 
312 const char *
313 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
314 {
315 	return subsystem->sn;
316 }
317 
318 const char *
319 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
320 {
321 	return subsystem->mn;
322 }
323 
324 void
325 spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn)
326 {
327 }
328 
329 static void
330 test_nvmf_tcp_create(void)
331 {
332 	struct spdk_thread *thread;
333 	struct spdk_nvmf_transport *transport;
334 	struct spdk_nvmf_tcp_transport *ttransport;
335 	struct spdk_nvmf_transport_opts opts;
336 
337 	thread = spdk_thread_create(NULL, NULL);
338 	SPDK_CU_ASSERT_FATAL(thread != NULL);
339 	spdk_set_thread(thread);
340 
341 	/* case 1 */
342 	memset(&opts, 0, sizeof(opts));
343 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
344 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
345 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
346 	opts.max_io_size = UT_MAX_IO_SIZE;
347 	opts.io_unit_size = UT_IO_UNIT_SIZE;
348 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
349 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
350 	/* expect success */
351 	transport = nvmf_tcp_create(&opts);
352 	CU_ASSERT_PTR_NOT_NULL(transport);
353 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
354 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
355 	transport->opts = opts;
356 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
357 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
358 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
359 	CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
360 	/* destroy transport */
361 	spdk_mempool_free(ttransport->transport.data_buf_pool);
362 	free(ttransport);
363 
364 	/* case 2 */
365 	memset(&opts, 0, sizeof(opts));
366 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
367 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
368 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
369 	opts.max_io_size = UT_MAX_IO_SIZE;
370 	opts.io_unit_size = UT_MAX_IO_SIZE + 1;
371 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
372 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
373 	/* expect success */
374 	transport = nvmf_tcp_create(&opts);
375 	CU_ASSERT_PTR_NOT_NULL(transport);
376 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
377 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
378 	transport->opts = opts;
379 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
380 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
381 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
382 	CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
383 	/* destroy transport */
384 	spdk_mempool_free(ttransport->transport.data_buf_pool);
385 	free(ttransport);
386 
387 	/* case 3 */
388 	memset(&opts, 0, sizeof(opts));
389 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
390 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
391 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
392 	opts.max_io_size = UT_MAX_IO_SIZE;
393 	opts.io_unit_size = 16;
394 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
395 	/* expect failse */
396 	transport = nvmf_tcp_create(&opts);
397 	CU_ASSERT_PTR_NULL(transport);
398 
399 	spdk_thread_exit(thread);
400 	while (!spdk_thread_is_exited(thread)) {
401 		spdk_thread_poll(thread, 0, 0);
402 	}
403 	spdk_thread_destroy(thread);
404 }
405 
406 static void
407 test_nvmf_tcp_destroy(void)
408 {
409 	struct spdk_thread *thread;
410 	struct spdk_nvmf_transport *transport;
411 	struct spdk_nvmf_transport_opts opts;
412 
413 	thread = spdk_thread_create(NULL, NULL);
414 	SPDK_CU_ASSERT_FATAL(thread != NULL);
415 	spdk_set_thread(thread);
416 
417 	/* case 1 */
418 	memset(&opts, 0, sizeof(opts));
419 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
420 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
421 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
422 	opts.max_io_size = UT_MAX_IO_SIZE;
423 	opts.io_unit_size = UT_IO_UNIT_SIZE;
424 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
425 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
426 	transport = nvmf_tcp_create(&opts);
427 	CU_ASSERT_PTR_NOT_NULL(transport);
428 	transport->opts = opts;
429 	/* destroy transport */
430 	CU_ASSERT(nvmf_tcp_destroy(transport) == 0);
431 
432 	spdk_thread_exit(thread);
433 	while (!spdk_thread_is_exited(thread)) {
434 		spdk_thread_poll(thread, 0, 0);
435 	}
436 	spdk_thread_destroy(thread);
437 }
438 
439 static void
440 test_nvmf_tcp_poll_group_create(void)
441 {
442 	struct spdk_nvmf_transport *transport;
443 	struct spdk_nvmf_transport_poll_group *group;
444 	struct spdk_thread *thread;
445 	struct spdk_nvmf_transport_opts opts;
446 	struct spdk_sock_group grp = {};
447 
448 	thread = spdk_thread_create(NULL, NULL);
449 	SPDK_CU_ASSERT_FATAL(thread != NULL);
450 	spdk_set_thread(thread);
451 
452 	memset(&opts, 0, sizeof(opts));
453 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
454 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
455 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
456 	opts.max_io_size = UT_MAX_IO_SIZE;
457 	opts.io_unit_size = UT_IO_UNIT_SIZE;
458 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
459 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
460 	transport = nvmf_tcp_create(&opts);
461 	CU_ASSERT_PTR_NOT_NULL(transport);
462 	transport->opts = opts;
463 	MOCK_SET(spdk_sock_group_create, &grp);
464 	group = nvmf_tcp_poll_group_create(transport);
465 	MOCK_CLEAR_P(spdk_sock_group_create);
466 	SPDK_CU_ASSERT_FATAL(group);
467 	group->transport = transport;
468 	nvmf_tcp_poll_group_destroy(group);
469 	nvmf_tcp_destroy(transport);
470 
471 	spdk_thread_exit(thread);
472 	while (!spdk_thread_is_exited(thread)) {
473 		spdk_thread_poll(thread, 0, 0);
474 	}
475 	spdk_thread_destroy(thread);
476 }
477 
478 static void
479 test_nvmf_tcp_send_c2h_data(void)
480 {
481 	struct spdk_thread *thread;
482 	struct spdk_nvmf_tcp_transport ttransport = {};
483 	struct spdk_nvmf_tcp_qpair tqpair = {};
484 	struct spdk_nvmf_tcp_req tcp_req = {};
485 	struct nvme_tcp_pdu pdu = {};
486 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
487 
488 	thread = spdk_thread_create(NULL, NULL);
489 	SPDK_CU_ASSERT_FATAL(thread != NULL);
490 	spdk_set_thread(thread);
491 
492 	tcp_req.pdu = &pdu;
493 	tcp_req.req.length = 300;
494 
495 	tqpair.qpair.transport = &ttransport.transport;
496 	TAILQ_INIT(&tqpair.send_queue);
497 
498 	/* Set qpair state to make unrelated operations NOP */
499 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
500 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
501 
502 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
503 
504 	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
505 	tcp_req.req.iov[0].iov_len = 101;
506 	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
507 	tcp_req.req.iov[1].iov_len = 100;
508 	tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
509 	tcp_req.req.iov[2].iov_len = 99;
510 	tcp_req.req.iovcnt = 3;
511 	tcp_req.req.length = 300;
512 
513 	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
514 
515 	CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
516 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
517 
518 	c2h_data = &pdu.hdr.c2h_data;
519 	CU_ASSERT(c2h_data->datao == 0);
520 	CU_ASSERT(c2h_data->datal = 300);
521 	CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
522 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
523 
524 	CU_ASSERT(pdu.data_iovcnt == 3);
525 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
526 	CU_ASSERT(pdu.data_iov[0].iov_len == 101);
527 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
528 	CU_ASSERT(pdu.data_iov[1].iov_len == 100);
529 	CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
530 	CU_ASSERT(pdu.data_iov[2].iov_len == 99);
531 
532 	spdk_thread_exit(thread);
533 	while (!spdk_thread_is_exited(thread)) {
534 		spdk_thread_poll(thread, 0, 0);
535 	}
536 	spdk_thread_destroy(thread);
537 }
538 
539 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024)
540 
541 static void
542 test_nvmf_tcp_h2c_data_hdr_handle(void)
543 {
544 	struct spdk_nvmf_tcp_transport ttransport = {};
545 	struct spdk_nvmf_tcp_qpair tqpair = {};
546 	struct nvme_tcp_pdu pdu = {};
547 	struct spdk_nvmf_tcp_req tcp_req = {};
548 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
549 
550 	TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
551 
552 	/* Set qpair state to make unrelated operations NOP */
553 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
554 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
555 
556 	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
557 	tcp_req.req.iov[0].iov_len = 101;
558 	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
559 	tcp_req.req.iov[1].iov_len = 99;
560 	tcp_req.req.iovcnt = 2;
561 	tcp_req.req.length = 200;
562 
563 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
564 	tcp_req.req.cmd->nvme_cmd.cid = 1;
565 	tcp_req.ttag = 2;
566 
567 	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
568 			  &tcp_req, state_link);
569 
570 	h2c_data = &pdu.hdr.h2c_data;
571 	h2c_data->cccid = 1;
572 	h2c_data->ttag = 2;
573 	h2c_data->datao = 0;
574 	h2c_data->datal = 200;
575 
576 	nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
577 
578 	CU_ASSERT(pdu.data_iovcnt == 2);
579 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
580 	CU_ASSERT(pdu.data_iov[0].iov_len == 101);
581 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
582 	CU_ASSERT(pdu.data_iov[1].iov_len == 99);
583 
584 	CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) ==
585 		  &tcp_req);
586 	TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
587 		     &tcp_req, state_link);
588 }
589 
590 
591 static void
592 test_nvmf_tcp_incapsule_data_handle(void)
593 {
594 	struct spdk_nvmf_tcp_transport ttransport = {};
595 	struct spdk_nvmf_tcp_qpair tqpair = {};
596 	struct nvme_tcp_pdu *pdu;
597 	union nvmf_c2h_msg rsp0 = {};
598 	union nvmf_c2h_msg rsp = {};
599 
600 	struct spdk_nvmf_request *req_temp = NULL;
601 	struct spdk_nvmf_tcp_req tcp_req2 = {};
602 	struct spdk_nvmf_tcp_req tcp_req1 = {};
603 
604 	struct spdk_nvme_tcp_cmd *capsule_data;
605 	struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
606 	struct spdk_nvme_sgl_descriptor *sgl;
607 
608 	struct spdk_nvmf_transport_poll_group *group;
609 	struct spdk_nvmf_tcp_poll_group tcp_group = {};
610 	struct spdk_sock_group grp = {};
611 	int i = 0;
612 
613 	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
614 	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
615 
616 	tcp_group.sock_group = &grp;
617 	TAILQ_INIT(&tcp_group.qpairs);
618 	group = &tcp_group.group;
619 	group->transport = &ttransport.transport;
620 	STAILQ_INIT(&group->pending_buf_queue);
621 	tqpair.group = &tcp_group;
622 
623 	/* init tqpair, add pdu to pdu_in_progress and wait for the buff */
624 	for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
625 		TAILQ_INIT(&tqpair.state_queue[i]);
626 	}
627 
628 	TAILQ_INIT(&tqpair.send_queue);
629 
630 	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link);
631 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
632 	tqpair.qpair.transport = &ttransport.transport;
633 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
634 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
635 
636 	/* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
637 	tcp_req2.req.qpair = &tqpair.qpair;
638 	tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
639 	tcp_req2.req.rsp = &rsp;
640 
641 	/* init tcp_req1 */
642 	tcp_req1.req.qpair = &tqpair.qpair;
643 	tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
644 	tcp_req1.req.rsp = &rsp0;
645 	tcp_req1.state = TCP_REQUEST_STATE_NEW;
646 
647 	TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link);
648 	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
649 
650 	/* init pdu, make pdu need sgl buff */
651 	pdu = &tqpair.pdu_in_progress;
652 	capsule_data = &pdu->hdr.capsule_cmd;
653 	nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
654 	sgl = &capsule_data->ccsqe.dptr.sgl1;
655 
656 	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
657 	capsule_data->common.hlen = sizeof(*capsule_data);
658 	capsule_data->common.plen = 1096;
659 	capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
660 
661 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
662 	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
663 	sgl->unkeyed.length = UT_IO_UNIT_SIZE;
664 
665 	nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
666 
667 	/* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */
668 	nvmf_tcp_req_process(&ttransport, &tcp_req1);
669 	CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
670 
671 	sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
672 
673 	/* process tqpair capsule req. but we still remain req in pending_buff. */
674 	nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress);
675 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
676 	CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
677 	STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
678 		if (req_temp == &tcp_req2.req) {
679 			break;
680 		}
681 	}
682 	CU_ASSERT(req_temp == NULL);
683 	CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
684 }
685 
686 
687 int main(int argc, char **argv)
688 {
689 	CU_pSuite	suite = NULL;
690 	unsigned int	num_failures;
691 
692 	CU_set_error_action(CUEA_ABORT);
693 	CU_initialize_registry();
694 
695 	suite = CU_add_suite("nvmf", NULL, NULL);
696 
697 	CU_ADD_TEST(suite, test_nvmf_tcp_create);
698 	CU_ADD_TEST(suite, test_nvmf_tcp_destroy);
699 	CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
700 	CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
701 	CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
702 	CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle);
703 
704 	CU_basic_set_mode(CU_BRM_VERBOSE);
705 	CU_basic_run_tests();
706 	num_failures = CU_get_number_of_failures();
707 	CU_cleanup_registry();
708 	return num_failures;
709 }
710