xref: /spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c (revision b68f2eeb0b5244d09648487a40584ccb480bfb6d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 #include "spdk/nvmf_spec.h"
37 #include "spdk_cunit.h"
38 
39 #include "spdk_internal/mock.h"
40 
41 #include "common/lib/test_env.c"
42 #include "common/lib/test_sock.c"
43 
44 #include "nvmf/ctrlr.c"
45 #include "nvmf/tcp.c"
46 
47 #define UT_IPV4_ADDR "192.168.0.1"
48 #define UT_PORT "4420"
49 #define UT_NVMF_ADRFAM_INVALID 0xf
50 #define UT_MAX_QUEUE_DEPTH 128
51 #define UT_MAX_QPAIRS_PER_CTRLR 128
52 #define UT_IN_CAPSULE_DATA_SIZE 1024
53 #define UT_MAX_IO_SIZE 4096
54 #define UT_IO_UNIT_SIZE 1024
55 #define UT_MAX_AQ_DEPTH 64
56 #define UT_SQ_HEAD_MAX 128
57 #define UT_NUM_SHARED_BUFFERS 128
58 
59 static void *g_accel_p = (void *)0xdeadbeaf;
60 
61 SPDK_LOG_REGISTER_COMPONENT(nvmf)
62 
63 DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
64 	    int,
65 	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
66 	    0);
67 
68 DEFINE_STUB(nvmf_subsystem_add_ctrlr,
69 	    int,
70 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
71 	    0);
72 
73 DEFINE_STUB(nvmf_subsystem_get_ctrlr,
74 	    struct spdk_nvmf_ctrlr *,
75 	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
76 	    NULL);
77 
78 DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
79 	    struct spdk_nvmf_subsystem *,
80 	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
81 	    NULL);
82 
83 DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
84 	    bool,
85 	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
86 	    true);
87 
88 DEFINE_STUB(nvmf_subsystem_find_listener,
89 	    struct spdk_nvmf_subsystem_listener *,
90 	    (struct spdk_nvmf_subsystem *subsystem,
91 	     const struct spdk_nvme_transport_id *trid),
92 	    (void *)0x1);
93 
94 DEFINE_STUB_V(nvmf_get_discovery_log_page,
95 	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
96 	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
97 
98 DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr,
99 	      (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
100 
101 DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
102 	    struct spdk_nvmf_ns *,
103 	    (struct spdk_nvmf_subsystem *subsystem),
104 	    NULL);
105 
106 DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
107 	    struct spdk_nvmf_ns *,
108 	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
109 	    NULL);
110 
111 DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
112 	    bool,
113 	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
114 	    true);
115 
116 DEFINE_STUB(nvmf_ctrlr_dsm_supported,
117 	    bool,
118 	    (struct spdk_nvmf_ctrlr *ctrlr),
119 	    false);
120 
121 DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
122 	    bool,
123 	    (struct spdk_nvmf_ctrlr *ctrlr),
124 	    false);
125 
126 DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
127 	    int,
128 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
129 	     struct spdk_nvmf_request *req),
130 	    0);
131 
132 DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
133 	    int,
134 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
135 	     struct spdk_nvmf_request *req),
136 	    0);
137 
138 DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
139 	    int,
140 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
141 	     struct spdk_nvmf_request *req),
142 	    0);
143 
144 DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
145 	    int,
146 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
147 	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
148 	    0);
149 
150 DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
151 	    int,
152 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
153 	     struct spdk_nvmf_request *req),
154 	    0);
155 
156 DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
157 	    int,
158 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
159 	     struct spdk_nvmf_request *req),
160 	    0);
161 
162 DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
163 	    int,
164 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
165 	     struct spdk_nvmf_request *req),
166 	    0);
167 
168 DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
169 	    int,
170 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
171 	     struct spdk_nvmf_request *req),
172 	    0);
173 
174 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
175 	    int,
176 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
177 	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
178 	    0);
179 
180 DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx,
181 	    bool,
182 	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
183 	    false);
184 
185 DEFINE_STUB(nvmf_transport_req_complete,
186 	    int,
187 	    (struct spdk_nvmf_request *req),
188 	    0);
189 
190 DEFINE_STUB(nvmf_bdev_zcopy_enabled,
191 	    bool,
192 	    (struct spdk_bdev *bdev),
193 	    false);
194 
195 DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start,
196 	    int,
197 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
198 	     struct spdk_nvmf_request *req),
199 	    0);
200 
201 DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit));
202 
203 DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
204 	      (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
205 	       struct spdk_nvmf_transport *transport));
206 
207 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
208 	    int,
209 	    (struct spdk_sock *sock, struct spdk_sock_group **group),
210 	    0);
211 
212 DEFINE_STUB(spdk_sock_group_get_ctx,
213 	    void *,
214 	    (struct spdk_sock_group *group),
215 	    NULL);
216 
217 DEFINE_STUB(spdk_sock_set_priority,
218 	    int,
219 	    (struct spdk_sock *sock, int priority),
220 	    0);
221 
222 DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
223 
224 DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
225 		enum spdk_nvme_transport_type trtype));
226 DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops));
227 
228 DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
229 
230 DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
231 	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
232 
233 DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
234 DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
235 
236 DEFINE_STUB(nvmf_transport_req_free,
237 	    int,
238 	    (struct spdk_nvmf_request *req),
239 	    0);
240 
241 DEFINE_STUB(accel_engine_create_cb, int, (void *io_device, void *ctx_buf), 0);
242 DEFINE_STUB_V(accel_engine_destroy_cb, (void *io_device, void *ctx_buf));
243 
244 struct spdk_io_channel *
245 spdk_accel_engine_get_io_channel(void)
246 {
247 	return spdk_get_io_channel(g_accel_p);
248 }
249 
250 DEFINE_STUB(spdk_accel_submit_crc32cv,
251 	    int,
252 	    (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs,
253 	     uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg),
254 	    0);
255 
256 DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
257 	    int,
258 	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
259 	     struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
260 	     spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
261 	    0)
262 
263 struct spdk_bdev {
264 	int ut_mock;
265 	uint64_t blockcnt;
266 };
267 
268 int
269 spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
270 			       const struct spdk_nvme_transport_id *trid2)
271 {
272 	return 0;
273 }
274 
275 const char *
276 spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
277 {
278 	switch (trtype) {
279 	case SPDK_NVME_TRANSPORT_PCIE:
280 		return "PCIe";
281 	case SPDK_NVME_TRANSPORT_RDMA:
282 		return "RDMA";
283 	case SPDK_NVME_TRANSPORT_FC:
284 		return "FC";
285 	default:
286 		return NULL;
287 	}
288 }
289 
290 int
291 spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
292 {
293 	int len, i;
294 
295 	if (trstring == NULL) {
296 		return -EINVAL;
297 	}
298 
299 	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
300 	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
301 		return -EINVAL;
302 	}
303 
304 	/* cast official trstring to uppercase version of input. */
305 	for (i = 0; i < len; i++) {
306 		trid->trstring[i] = toupper(trstring[i]);
307 	}
308 	return 0;
309 }
310 
311 int
312 spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
313 {
314 	return 0;
315 }
316 
317 int
318 spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
319 			      struct spdk_nvmf_transport_poll_group *group,
320 			      struct spdk_nvmf_transport *transport,
321 			      uint32_t length)
322 {
323 	/* length more than 1 io unit length will fail. */
324 	if (length >= transport->opts.io_unit_size) {
325 		return -EINVAL;
326 	}
327 
328 	req->iovcnt = 1;
329 	req->iov[0].iov_base = (void *)0xDEADBEEF;
330 
331 	return 0;
332 }
333 
334 
335 void
336 nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
337 			    bool dif_insert_or_strip)
338 {
339 	uint64_t num_blocks;
340 
341 	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
342 	num_blocks = ns->bdev->blockcnt;
343 	nsdata->nsze = num_blocks;
344 	nsdata->ncap = num_blocks;
345 	nsdata->nuse = num_blocks;
346 	nsdata->nlbaf = 0;
347 	nsdata->flbas.format = 0;
348 	nsdata->lbaf[0].lbads = spdk_u32log2(512);
349 }
350 
351 const char *
352 spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
353 {
354 	return subsystem->sn;
355 }
356 
357 const char *
358 spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
359 {
360 	return subsystem->mn;
361 }
362 
363 static void
364 test_nvmf_tcp_create(void)
365 {
366 	struct spdk_thread *thread;
367 	struct spdk_nvmf_transport *transport;
368 	struct spdk_nvmf_tcp_transport *ttransport;
369 	struct spdk_nvmf_transport_opts opts;
370 
371 	thread = spdk_thread_create(NULL, NULL);
372 	SPDK_CU_ASSERT_FATAL(thread != NULL);
373 	spdk_set_thread(thread);
374 
375 	/* case 1 */
376 	memset(&opts, 0, sizeof(opts));
377 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
378 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
379 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
380 	opts.max_io_size = UT_MAX_IO_SIZE;
381 	opts.io_unit_size = UT_IO_UNIT_SIZE;
382 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
383 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
384 	/* expect success */
385 	transport = nvmf_tcp_create(&opts);
386 	CU_ASSERT_PTR_NOT_NULL(transport);
387 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
388 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
389 	transport->opts = opts;
390 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
391 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
392 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
393 	CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
394 	/* destroy transport */
395 	spdk_mempool_free(ttransport->transport.data_buf_pool);
396 	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
397 
398 	/* case 2 */
399 	memset(&opts, 0, sizeof(opts));
400 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
401 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
402 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
403 	opts.max_io_size = UT_MAX_IO_SIZE;
404 	opts.io_unit_size = UT_MAX_IO_SIZE + 1;
405 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
406 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
407 	/* expect success */
408 	transport = nvmf_tcp_create(&opts);
409 	CU_ASSERT_PTR_NOT_NULL(transport);
410 	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
411 	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
412 	transport->opts = opts;
413 	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
414 	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
415 	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
416 	CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
417 	/* destroy transport */
418 	spdk_mempool_free(ttransport->transport.data_buf_pool);
419 	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
420 
421 	/* case 3 */
422 	memset(&opts, 0, sizeof(opts));
423 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
424 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
425 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
426 	opts.max_io_size = UT_MAX_IO_SIZE;
427 	opts.io_unit_size = 16;
428 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
429 	/* expect fails */
430 	transport = nvmf_tcp_create(&opts);
431 	CU_ASSERT_PTR_NULL(transport);
432 
433 	spdk_thread_exit(thread);
434 	while (!spdk_thread_is_exited(thread)) {
435 		spdk_thread_poll(thread, 0, 0);
436 	}
437 	spdk_thread_destroy(thread);
438 }
439 
440 static void
441 test_nvmf_tcp_destroy(void)
442 {
443 	struct spdk_thread *thread;
444 	struct spdk_nvmf_transport *transport;
445 	struct spdk_nvmf_transport_opts opts;
446 
447 	thread = spdk_thread_create(NULL, NULL);
448 	SPDK_CU_ASSERT_FATAL(thread != NULL);
449 	spdk_set_thread(thread);
450 
451 	/* case 1 */
452 	memset(&opts, 0, sizeof(opts));
453 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
454 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
455 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
456 	opts.max_io_size = UT_MAX_IO_SIZE;
457 	opts.io_unit_size = UT_IO_UNIT_SIZE;
458 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
459 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
460 	transport = nvmf_tcp_create(&opts);
461 	CU_ASSERT_PTR_NOT_NULL(transport);
462 	transport->opts = opts;
463 	/* destroy transport */
464 	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
465 
466 	spdk_thread_exit(thread);
467 	while (!spdk_thread_is_exited(thread)) {
468 		spdk_thread_poll(thread, 0, 0);
469 	}
470 	spdk_thread_destroy(thread);
471 }
472 
473 static void
474 init_accel(void)
475 {
476 	spdk_io_device_register(g_accel_p, accel_engine_create_cb, accel_engine_destroy_cb,
477 				sizeof(int), "accel_p");
478 }
479 
480 static void
481 fini_accel(void)
482 {
483 	spdk_io_device_unregister(g_accel_p, NULL);
484 }
485 
486 static void
487 test_nvmf_tcp_poll_group_create(void)
488 {
489 	struct spdk_nvmf_transport *transport;
490 	struct spdk_nvmf_transport_poll_group *group;
491 	struct spdk_nvmf_tcp_poll_group *tgroup;
492 	struct spdk_thread *thread;
493 	struct spdk_nvmf_transport_opts opts;
494 	struct spdk_sock_group grp = {};
495 
496 	thread = spdk_thread_create(NULL, NULL);
497 	SPDK_CU_ASSERT_FATAL(thread != NULL);
498 	spdk_set_thread(thread);
499 
500 	init_accel();
501 
502 	memset(&opts, 0, sizeof(opts));
503 	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
504 	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
505 	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
506 	opts.max_io_size = UT_MAX_IO_SIZE;
507 	opts.io_unit_size = UT_IO_UNIT_SIZE;
508 	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
509 	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
510 	transport = nvmf_tcp_create(&opts);
511 	CU_ASSERT_PTR_NOT_NULL(transport);
512 	transport->opts = opts;
513 	MOCK_SET(spdk_sock_group_create, &grp);
514 	group = nvmf_tcp_poll_group_create(transport);
515 	MOCK_CLEAR_P(spdk_sock_group_create);
516 	SPDK_CU_ASSERT_FATAL(group);
517 	if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
518 		tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
519 		SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list);
520 	}
521 	group->transport = transport;
522 	nvmf_tcp_poll_group_destroy(group);
523 	nvmf_tcp_destroy(transport, NULL, NULL);
524 
525 	fini_accel();
526 	spdk_thread_exit(thread);
527 	while (!spdk_thread_is_exited(thread)) {
528 		spdk_thread_poll(thread, 0, 0);
529 	}
530 	spdk_thread_destroy(thread);
531 }
532 
533 static void
534 test_nvmf_tcp_send_c2h_data(void)
535 {
536 	struct spdk_thread *thread;
537 	struct spdk_nvmf_tcp_transport ttransport = {};
538 	struct spdk_nvmf_tcp_qpair tqpair = {};
539 	struct spdk_nvmf_tcp_req tcp_req = {};
540 	struct nvme_tcp_pdu pdu = {};
541 	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
542 
543 	ttransport.tcp_opts.c2h_success = true;
544 	thread = spdk_thread_create(NULL, NULL);
545 	SPDK_CU_ASSERT_FATAL(thread != NULL);
546 	spdk_set_thread(thread);
547 
548 	tcp_req.pdu = &pdu;
549 	tcp_req.req.length = 300;
550 	tcp_req.req.qpair = &tqpair.qpair;
551 
552 	tqpair.qpair.transport = &ttransport.transport;
553 
554 	/* Set qpair state to make unrelated operations NOP */
555 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
556 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
557 
558 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
559 
560 	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
561 	tcp_req.req.iov[0].iov_len = 101;
562 	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
563 	tcp_req.req.iov[1].iov_len = 100;
564 	tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
565 	tcp_req.req.iov[2].iov_len = 99;
566 	tcp_req.req.iovcnt = 3;
567 	tcp_req.req.length = 300;
568 
569 	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
570 
571 	c2h_data = &pdu.hdr.c2h_data;
572 	CU_ASSERT(c2h_data->datao == 0);
573 	CU_ASSERT(c2h_data->datal = 300);
574 	CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
575 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
576 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS);
577 
578 	CU_ASSERT(pdu.data_iovcnt == 3);
579 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
580 	CU_ASSERT(pdu.data_iov[0].iov_len == 101);
581 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
582 	CU_ASSERT(pdu.data_iov[1].iov_len == 100);
583 	CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
584 	CU_ASSERT(pdu.data_iov[2].iov_len == 99);
585 
586 	tcp_req.pdu_in_use = false;
587 	tcp_req.rsp.cdw0 = 1;
588 	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
589 
590 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
591 	CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
592 
593 	ttransport.tcp_opts.c2h_success = false;
594 	tcp_req.pdu_in_use = false;
595 	tcp_req.rsp.cdw0 = 0;
596 	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
597 
598 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
599 	CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
600 
601 	tcp_req.pdu_in_use = false;
602 	tcp_req.rsp.cdw0 = 1;
603 	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
604 
605 	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
606 	CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
607 
608 	spdk_thread_exit(thread);
609 	while (!spdk_thread_is_exited(thread)) {
610 		spdk_thread_poll(thread, 0, 0);
611 	}
612 	spdk_thread_destroy(thread);
613 }
614 
615 #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024)
616 
617 static void
618 test_nvmf_tcp_h2c_data_hdr_handle(void)
619 {
620 	struct spdk_nvmf_tcp_transport ttransport = {};
621 	struct spdk_nvmf_tcp_qpair tqpair = {};
622 	struct nvme_tcp_pdu pdu = {};
623 	struct spdk_nvmf_tcp_req tcp_req = {};
624 	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
625 
626 	TAILQ_INIT(&tqpair.tcp_req_working_queue);
627 
628 	/* Set qpair state to make unrelated operations NOP */
629 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
630 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
631 
632 	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
633 	tcp_req.req.iov[0].iov_len = 101;
634 	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
635 	tcp_req.req.iov[1].iov_len = 99;
636 	tcp_req.req.iovcnt = 2;
637 	tcp_req.req.length = 200;
638 	tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;
639 
640 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
641 	tcp_req.req.cmd->nvme_cmd.cid = 1;
642 	tcp_req.ttag = 2;
643 
644 	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue,
645 			  &tcp_req, state_link);
646 
647 	h2c_data = &pdu.hdr.h2c_data;
648 	h2c_data->cccid = 1;
649 	h2c_data->ttag = 2;
650 	h2c_data->datao = 0;
651 	h2c_data->datal = 200;
652 
653 	nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
654 
655 	CU_ASSERT(pdu.data_iovcnt == 2);
656 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
657 	CU_ASSERT(pdu.data_iov[0].iov_len == 101);
658 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
659 	CU_ASSERT(pdu.data_iov[1].iov_len == 99);
660 
661 	CU_ASSERT(TAILQ_FIRST(&tqpair.tcp_req_working_queue) ==
662 		  &tcp_req);
663 	TAILQ_REMOVE(&tqpair.tcp_req_working_queue,
664 		     &tcp_req, state_link);
665 }
666 
667 
668 static void
669 test_nvmf_tcp_in_capsule_data_handle(void)
670 {
671 	struct spdk_nvmf_tcp_transport ttransport = {};
672 	struct spdk_nvmf_tcp_qpair tqpair = {};
673 	struct nvme_tcp_pdu *pdu, pdu_in_progress = {};
674 	union nvmf_c2h_msg rsp0 = {};
675 	union nvmf_c2h_msg rsp = {};
676 
677 	struct spdk_nvmf_request *req_temp = NULL;
678 	struct spdk_nvmf_tcp_req tcp_req2 = {};
679 	struct spdk_nvmf_tcp_req tcp_req1 = {};
680 
681 	struct spdk_nvme_tcp_cmd *capsule_data;
682 	struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
683 	struct spdk_nvme_sgl_descriptor *sgl;
684 
685 	struct spdk_nvmf_transport_poll_group *group;
686 	struct spdk_nvmf_tcp_poll_group tcp_group = {};
687 	struct spdk_sock_group grp = {};
688 
689 	tqpair.pdu_in_progress = &pdu_in_progress;
690 	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
691 	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
692 
693 	tcp_group.sock_group = &grp;
694 	TAILQ_INIT(&tcp_group.qpairs);
695 	group = &tcp_group.group;
696 	group->transport = &ttransport.transport;
697 	STAILQ_INIT(&group->pending_buf_queue);
698 	tqpair.group = &tcp_group;
699 
700 	TAILQ_INIT(&tqpair.tcp_req_free_queue);
701 	TAILQ_INIT(&tqpair.tcp_req_working_queue);
702 
703 	TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link);
704 	tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
705 	tqpair.qpair.transport = &ttransport.transport;
706 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
707 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
708 	tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
709 
710 	/* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
711 	tcp_req2.req.qpair = &tqpair.qpair;
712 	tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
713 	tcp_req2.req.rsp = &rsp;
714 
715 	/* init tcp_req1 */
716 	tcp_req1.req.qpair = &tqpair.qpair;
717 	tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
718 	tcp_req1.req.rsp = &rsp0;
719 	tcp_req1.state = TCP_REQUEST_STATE_NEW;
720 
721 	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link);
722 	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
723 
724 	/* init pdu, make pdu need sgl buff */
725 	pdu = tqpair.pdu_in_progress;
726 	capsule_data = &pdu->hdr.capsule_cmd;
727 	nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
728 	sgl = &capsule_data->ccsqe.dptr.sgl1;
729 
730 	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
731 	capsule_data->common.hlen = sizeof(*capsule_data);
732 	capsule_data->common.plen = 1096;
733 	capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
734 
735 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
736 	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
737 	sgl->unkeyed.length = UT_IO_UNIT_SIZE;
738 
739 	nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
740 
741 	/* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */
742 	nvmf_tcp_req_process(&ttransport, &tcp_req1);
743 	CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
744 
745 	sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
746 
747 	/* process tqpair capsule req. but we still remain req in pending_buff. */
748 	nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress);
749 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
750 	CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
751 	STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
752 		if (req_temp == &tcp_req2.req) {
753 			break;
754 		}
755 	}
756 	CU_ASSERT(req_temp == NULL);
757 	CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2);
758 }
759 
760 static void
761 test_nvmf_tcp_qpair_init_mem_resource(void)
762 {
763 	int rc;
764 	struct spdk_nvmf_tcp_qpair *tqpair = NULL;
765 	struct spdk_nvmf_transport transport = {};
766 
767 	tqpair = calloc(1, sizeof(*tqpair));
768 	tqpair->qpair.transport = &transport;
769 
770 	nvmf_tcp_opts_init(&transport.opts);
771 	CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH);
772 	CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR);
773 	CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE);
774 	CU_ASSERT(transport.opts.max_io_size ==	SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE);
775 	CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE);
776 	CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_AQ_DEPTH);
777 	CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS);
778 	CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE);
779 	CU_ASSERT(transport.opts.dif_insert_or_strip ==	SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP);
780 	CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC);
781 	CU_ASSERT(transport.opts.transport_specific == NULL);
782 
783 	rc = nvmf_tcp_qpair_init(&tqpair->qpair);
784 	CU_ASSERT(rc == 0);
785 	CU_ASSERT(tqpair->host_hdgst_enable == true);
786 	CU_ASSERT(tqpair->host_ddgst_enable == true);
787 
788 	rc = nvmf_tcp_qpair_init_mem_resource(tqpair);
789 	CU_ASSERT(rc == 0);
790 	CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH);
791 	CU_ASSERT(tqpair->reqs != NULL);
792 	CU_ASSERT(tqpair->bufs != NULL);
793 	CU_ASSERT(tqpair->pdus != NULL);
794 	/* Just to check the first and last entry */
795 	CU_ASSERT(tqpair->reqs[0].ttag == 1);
796 	CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair);
797 	CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]);
798 	CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair);
799 	CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs));
800 	CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp);
801 	CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd);
802 	CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE);
803 	CU_ASSERT(tqpair->reqs[127].ttag == 128);
804 	CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair);
805 	CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]);
806 	CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair);
807 	CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096);
808 	CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp);
809 	CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd);
810 	CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE);
811 	CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH);
812 	CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH]);
813 	CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair);
814 	CU_ASSERT(tqpair->pdu_in_progress == &tqpair->pdus[SPDK_NVMF_TCP_DEFAULT_MAX_QUEUE_DEPTH + 1]);
815 	CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
816 					    SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR);
817 
818 	/* Free all of tqpair resource */
819 	nvmf_tcp_qpair_destroy(tqpair);
820 }
821 
822 static void
823 test_nvmf_tcp_send_c2h_term_req(void)
824 {
825 	struct spdk_nvmf_tcp_qpair tqpair = {};
826 	struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {};
827 	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
828 	uint32_t error_offset = 1;
829 
830 	mgmt_pdu.sgl.total_size = 0;
831 	mgmt_pdu.qpair = &tqpair;
832 	tqpair.mgmt_pdu = &mgmt_pdu;
833 	tqpair.pdu_in_progress = &pdu_in_progress;
834 
835 	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
836 	pdu.hdr.common.hlen = 64;
837 	nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
838 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
839 	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
840 	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
841 		  pdu.hdr.common.hlen);
842 	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
843 
844 	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
845 	pdu.hdr.common.hlen = 255;
846 	nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
847 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
848 	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
849 	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned)
850 		  tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
851 	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
852 }
853 
854 static void
855 test_nvmf_tcp_send_capsule_resp_pdu(void)
856 {
857 	struct spdk_nvmf_tcp_req tcp_req = {};
858 	struct spdk_nvmf_tcp_qpair tqpair = {};
859 	struct nvme_tcp_pdu pdu = {};
860 
861 	tcp_req.pdu_in_use = false;
862 	tcp_req.req.qpair = &tqpair.qpair;
863 	tcp_req.pdu = &pdu;
864 	tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp;
865 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
866 	tqpair.host_hdgst_enable = true;
867 
868 	nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair);
869 	CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP);
870 	CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) +
871 		  SPDK_NVME_TCP_DIGEST_LEN);
872 	CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp));
873 	CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl,
874 			  sizeof(struct spdk_nvme_cpl)));
875 	CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
876 	CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free);
877 	CU_ASSERT(pdu.cb_arg == &tcp_req);
878 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
879 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN);
880 
881 	/* hdgst disable */
882 	tqpair.host_hdgst_enable = false;
883 	tcp_req.pdu_in_use = false;
884 	memset(&pdu, 0, sizeof(pdu));
885 
886 	nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair);
887 	CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP);
888 	CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp));
889 	CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp));
890 	CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl,
891 			  sizeof(struct spdk_nvme_cpl)));
892 	CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF));
893 	CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free);
894 	CU_ASSERT(pdu.cb_arg == &tcp_req);
895 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
896 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp));
897 }
898 
899 static void
900 test_nvmf_tcp_icreq_handle(void)
901 {
902 	struct spdk_nvmf_tcp_transport ttransport = {};
903 	struct spdk_nvmf_tcp_qpair tqpair = {};
904 	struct nvme_tcp_pdu pdu = {};
905 	struct nvme_tcp_pdu mgmt_pdu = {};
906 	struct nvme_tcp_pdu pdu_in_progress = {};
907 	struct spdk_nvme_tcp_ic_resp *ic_resp;
908 
909 	mgmt_pdu.qpair = &tqpair;
910 	tqpair.mgmt_pdu = &mgmt_pdu;
911 	tqpair.pdu_in_progress = &pdu_in_progress;
912 
913 	/* case 1: Expected ICReq PFV 0 and got are different. */
914 	pdu.hdr.ic_req.pfv = 1;
915 
916 	nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
917 
918 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
919 
920 	/* case 2: Expect: PASS.  */
921 	ttransport.transport.opts.max_io_size = 32;
922 	pdu.hdr.ic_req.pfv = 0;
923 	tqpair.host_hdgst_enable = false;
924 	tqpair.host_ddgst_enable = false;
925 	tqpair.recv_buf_size = 64;
926 	pdu.hdr.ic_req.hpda = 16;
927 
928 	nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
929 
930 	ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp;
931 	CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE);
932 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda);
933 	CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP);
934 	CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp));
935 	CU_ASSERT(ic_resp->common.plen ==  sizeof(struct spdk_nvme_tcp_ic_resp));
936 	CU_ASSERT(ic_resp->pfv == 0);
937 	CU_ASSERT(ic_resp->cpda == tqpair.cpda);
938 	CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size);
939 	CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0);
940 	CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0);
941 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
942 }
943 
944 static void
945 test_nvmf_tcp_check_xfer_type(void)
946 {
947 	const uint16_t cid = 0xAA;
948 	struct spdk_nvmf_tcp_transport ttransport = {};
949 	struct spdk_nvmf_tcp_qpair tqpair = {};
950 	struct nvme_tcp_pdu pdu_in_progress = {};
951 	union nvmf_c2h_msg rsp0 = {};
952 
953 	struct spdk_nvmf_tcp_req tcp_req = {};
954 	struct nvme_tcp_pdu rsp_pdu = {};
955 
956 	struct spdk_nvme_tcp_cmd *capsule_data;
957 	struct spdk_nvme_sgl_descriptor *sgl;
958 
959 	struct spdk_nvmf_transport_poll_group *group;
960 	struct spdk_nvmf_tcp_poll_group tcp_group = {};
961 	struct spdk_sock_group grp = {};
962 
963 	tqpair.pdu_in_progress = &pdu_in_progress;
964 	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
965 	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
966 
967 	tcp_group.sock_group = &grp;
968 	TAILQ_INIT(&tcp_group.qpairs);
969 	group = &tcp_group.group;
970 	group->transport = &ttransport.transport;
971 	STAILQ_INIT(&group->pending_buf_queue);
972 	tqpair.group = &tcp_group;
973 
974 	TAILQ_INIT(&tqpair.tcp_req_free_queue);
975 	TAILQ_INIT(&tqpair.tcp_req_working_queue);
976 
977 	tqpair.qpair.transport = &ttransport.transport;
978 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
979 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
980 	tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
981 
982 	/* init tcp_req */
983 	tcp_req.req.qpair = &tqpair.qpair;
984 	tcp_req.pdu = &rsp_pdu;
985 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
986 	tcp_req.req.rsp = &rsp0;
987 	tcp_req.state = TCP_REQUEST_STATE_NEW;
988 
989 	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link);
990 	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
991 
992 	/* init pdu, make pdu need sgl buff */
993 	capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd;
994 	sgl = &capsule_data->ccsqe.dptr.sgl1;
995 
996 	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
997 	capsule_data->common.hlen = sizeof(*capsule_data);
998 	capsule_data->common.plen = 1096;
999 	capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
1000 	/* Need to set to a non zero valid to check it gets copied to the response */
1001 	capsule_data->ccsqe.cid = cid;
1002 
1003 	/* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */
1004 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
1005 	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
1006 	sgl->unkeyed.length = UT_IO_UNIT_SIZE;
1007 
1008 	/* Process a command and ensure that it fails and the request is set up to return an error */
1009 	nvmf_tcp_req_process(&ttransport, &tcp_req);
1010 	CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue));
1011 	CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
1012 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1013 	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid);
1014 	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1015 	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1016 }
1017 
1018 static void
1019 test_nvmf_tcp_invalid_sgl(void)
1020 {
1021 	const uint16_t cid = 0xAABB;
1022 	struct spdk_nvmf_tcp_transport ttransport = {};
1023 	struct spdk_nvmf_tcp_qpair tqpair = {};
1024 	struct nvme_tcp_pdu pdu_in_progress = {};
1025 	union nvmf_c2h_msg rsp0 = {};
1026 
1027 	struct spdk_nvmf_tcp_req tcp_req = {};
1028 	struct nvme_tcp_pdu rsp_pdu = {};
1029 
1030 	struct spdk_nvme_tcp_cmd *capsule_data;
1031 	struct spdk_nvme_sgl_descriptor *sgl;
1032 
1033 	struct spdk_nvmf_transport_poll_group *group;
1034 	struct spdk_nvmf_tcp_poll_group tcp_group = {};
1035 	struct spdk_sock_group grp = {};
1036 
1037 	tqpair.pdu_in_progress = &pdu_in_progress;
1038 	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
1039 	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
1040 
1041 	tcp_group.sock_group = &grp;
1042 	TAILQ_INIT(&tcp_group.qpairs);
1043 	group = &tcp_group.group;
1044 	group->transport = &ttransport.transport;
1045 	STAILQ_INIT(&group->pending_buf_queue);
1046 	tqpair.group = &tcp_group;
1047 
1048 	TAILQ_INIT(&tqpair.tcp_req_free_queue);
1049 	TAILQ_INIT(&tqpair.tcp_req_working_queue);
1050 
1051 	tqpair.qpair.transport = &ttransport.transport;
1052 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1053 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1054 	tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
1055 
1056 	/* init tcp_req */
1057 	tcp_req.req.qpair = &tqpair.qpair;
1058 	tcp_req.pdu = &rsp_pdu;
1059 	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
1060 	tcp_req.req.rsp = &rsp0;
1061 	tcp_req.state = TCP_REQUEST_STATE_NEW;
1062 
1063 	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link);
1064 	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
1065 
1066 	/* init pdu, make pdu need sgl buff */
1067 	capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd;
1068 	sgl = &capsule_data->ccsqe.dptr.sgl1;
1069 
1070 	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
1071 	capsule_data->common.hlen = sizeof(*capsule_data);
1072 	capsule_data->common.plen = 1096;
1073 	capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE;
1074 	/* Need to set to a non zero valid to check it gets copied to the response */
1075 	capsule_data->ccsqe.cid = cid;
1076 
1077 	/* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */
1078 	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
1079 	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
1080 	sgl->unkeyed.length = UT_MAX_IO_SIZE + 1;
1081 
1082 	/* Process a command and ensure that it fails and the request is set up to return an error */
1083 	nvmf_tcp_req_process(&ttransport, &tcp_req);
1084 	CU_ASSERT(STAILQ_EMPTY(&group->pending_buf_queue));
1085 	CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
1086 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1087 	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid);
1088 	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1089 	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
1090 }
1091 
1092 int main(int argc, char **argv)
1093 {
1094 	CU_pSuite	suite = NULL;
1095 	unsigned int	num_failures;
1096 
1097 	CU_set_error_action(CUEA_ABORT);
1098 	CU_initialize_registry();
1099 
1100 	suite = CU_add_suite("nvmf", NULL, NULL);
1101 
1102 	CU_ADD_TEST(suite, test_nvmf_tcp_create);
1103 	CU_ADD_TEST(suite, test_nvmf_tcp_destroy);
1104 	CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
1105 	CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
1106 	CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
1107 	CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle);
1108 	CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource);
1109 	CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req);
1110 	CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu);
1111 	CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle);
1112 	CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type);
1113 	CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl);
1114 
1115 	CU_basic_set_mode(CU_BRM_VERBOSE);
1116 	CU_basic_run_tests();
1117 	num_failures = CU_get_number_of_failures();
1118 	CU_cleanup_registry();
1119 	return num_failures;
1120 }
1121