xref: /spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c (revision cc6920a4763d4b9a43aa40583c8397d8f14fa100)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk_cunit.h"
38 
39 #include "common/lib/test_sock.c"
40 
41 #include "nvme/nvme_tcp.c"
42 #include "common/lib/nvme/common_stubs.h"
43 
44 SPDK_LOG_REGISTER_COMPONENT(nvme)
45 
46 DEFINE_STUB(nvme_qpair_submit_request,
47 	    int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
48 
49 DEFINE_STUB(spdk_sock_set_priority,
50 	    int, (struct spdk_sock *sock, int priority), 0);
51 
52 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
53 		struct spdk_nvme_qpair *qpair), 0);
54 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
55 	    int,
56 	    (struct spdk_sock *sock, struct spdk_sock_group **group),
57 	    0);
58 
59 DEFINE_STUB(spdk_sock_group_get_ctx,
60 	    void *,
61 	    (struct spdk_sock_group *group),
62 	    NULL);
63 
64 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
65 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
66 
67 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
68 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
69 
70 static void
71 test_nvme_tcp_pdu_set_data_buf(void)
72 {
73 	struct nvme_tcp_pdu pdu = {};
74 	struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
75 	uint32_t data_len;
76 	uint64_t i;
77 
78 	/* 1st case: input is a single SGL entry. */
79 	iov[0].iov_base = (void *)0xDEADBEEF;
80 	iov[0].iov_len = 4096;
81 
82 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
83 
84 	CU_ASSERT(pdu.data_iovcnt == 1);
85 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
86 	CU_ASSERT(pdu.data_iov[0].iov_len == 512);
87 
88 	/* 2nd case: simulate split on multiple SGL entries. */
89 	iov[0].iov_base = (void *)0xDEADBEEF;
90 	iov[0].iov_len = 4096;
91 	iov[1].iov_base = (void *)0xFEEDBEEF;
92 	iov[1].iov_len = 512 * 7;
93 	iov[2].iov_base = (void *)0xF00DF00D;
94 	iov[2].iov_len = 4096 * 2;
95 
96 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
97 
98 	CU_ASSERT(pdu.data_iovcnt == 1);
99 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
100 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
101 
102 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
103 
104 	CU_ASSERT(pdu.data_iovcnt == 2);
105 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
106 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
107 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
108 	CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
109 
110 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
111 
112 	CU_ASSERT(pdu.data_iovcnt == 2);
113 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
114 	CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
115 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
116 	CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
117 
118 	/* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
119 	 * entries.
120 	 */
121 	data_len = 0;
122 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
123 		iov[i].iov_base = (void *)(0xDEADBEEF + i);
124 		iov[i].iov_len = 512 * (i + 1);
125 		data_len += 512 * (i + 1);
126 	}
127 
128 	nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
129 
130 	CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
131 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
132 		CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
133 		CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
134 	}
135 }
136 
137 static void
138 test_nvme_tcp_build_iovs(void)
139 {
140 	const uintptr_t pdu_iov_len = 4096;
141 	struct nvme_tcp_pdu pdu = {};
142 	struct iovec iovs[5] = {};
143 	uint32_t mapped_length = 0;
144 	int rc;
145 
146 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
147 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
148 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
149 			      SPDK_NVME_TCP_DIGEST_LEN;
150 	pdu.data_len = pdu_iov_len * 2;
151 	pdu.padding_len = 0;
152 
153 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
154 	pdu.data_iov[0].iov_len = pdu_iov_len;
155 	pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
156 	pdu.data_iov[1].iov_len = pdu_iov_len;
157 	pdu.data_iovcnt = 2;
158 
159 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
160 	CU_ASSERT(rc == 4);
161 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
162 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
163 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
164 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
165 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
166 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
167 	CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
168 	CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
169 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
170 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
171 
172 	/* Add a new data_iov entry, update pdu iov count and data length */
173 	pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
174 	pdu.data_iov[2].iov_len = 123;
175 	pdu.data_iovcnt = 3;
176 	pdu.data_len += 123;
177 	pdu.hdr.common.plen += 123;
178 
179 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
180 	CU_ASSERT(rc == 5);
181 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
182 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
183 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
184 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
185 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
186 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
187 	CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
188 	CU_ASSERT(iovs[3].iov_len == 123);
189 	CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
190 	CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
191 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
192 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
193 }
194 
195 struct nvme_tcp_ut_bdev_io {
196 	struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
197 	int iovpos;
198 };
199 
200 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
201 static void
202 nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
203 {
204 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
205 	struct iovec *iov;
206 
207 	for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
208 		iov = &bio->iovs[bio->iovpos];
209 		/* Offset must be aligned with the start of any SGL entry */
210 		if (offset == 0) {
211 			break;
212 		}
213 
214 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
215 		offset -= iov->iov_len;
216 	}
217 
218 	SPDK_CU_ASSERT_FATAL(offset == 0);
219 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
220 }
221 
222 static int
223 nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
224 {
225 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
226 	struct iovec *iov;
227 
228 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
229 
230 	iov = &bio->iovs[bio->iovpos];
231 
232 	*address = iov->iov_base;
233 	*length = iov->iov_len;
234 	bio->iovpos++;
235 
236 	return 0;
237 }
238 
239 static void
240 test_nvme_tcp_build_sgl_request(void)
241 {
242 	struct nvme_tcp_qpair tqpair;
243 	struct spdk_nvme_ctrlr ctrlr = {0};
244 	struct nvme_tcp_req tcp_req = {0};
245 	struct nvme_request req = {{0}};
246 	struct nvme_tcp_ut_bdev_io bio;
247 	uint64_t i;
248 	int rc;
249 
250 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
251 	tqpair.qpair.ctrlr = &ctrlr;
252 	tcp_req.req = &req;
253 
254 	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
255 	req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
256 	req.payload.contig_or_cb_arg = &bio;
257 	req.qpair = &tqpair.qpair;
258 
259 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
260 		bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
261 		bio.iovs[i].iov_len = 0;
262 	}
263 
264 	/* Test case 1: Single SGL. Expected: PASS */
265 	bio.iovpos = 0;
266 	req.payload_offset = 0;
267 	req.payload_size = 0x1000;
268 	bio.iovs[0].iov_len = 0x1000;
269 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
270 	SPDK_CU_ASSERT_FATAL(rc == 0);
271 	CU_ASSERT(bio.iovpos == 1);
272 	CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
273 	CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
274 	CU_ASSERT(tcp_req.iovcnt == 1);
275 
276 	/* Test case 2: Multiple SGL. Expected: PASS */
277 	bio.iovpos = 0;
278 	req.payload_offset = 0;
279 	req.payload_size = 0x4000;
280 	for (i = 0; i < 4; i++) {
281 		bio.iovs[i].iov_len = 0x1000;
282 	}
283 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
284 	SPDK_CU_ASSERT_FATAL(rc == 0);
285 	CU_ASSERT(bio.iovpos == 4);
286 	CU_ASSERT(tcp_req.iovcnt == 4);
287 	for (i = 0; i < 4; i++) {
288 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
289 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
290 	}
291 
292 	/* Test case 3: Payload is bigger than SGL. Expected: FAIL */
293 	bio.iovpos = 0;
294 	req.payload_offset = 0;
295 	req.payload_size = 0x17000;
296 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
297 		bio.iovs[i].iov_len = 0x1000;
298 	}
299 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
300 	SPDK_CU_ASSERT_FATAL(rc != 0);
301 	CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
302 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
303 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
304 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
305 	}
306 }
307 
308 static void
309 test_nvme_tcp_pdu_set_data_buf_with_md(void)
310 {
311 	struct nvme_tcp_pdu pdu = {};
312 	struct iovec iovs[7] = {};
313 	struct spdk_dif_ctx dif_ctx = {};
314 	int rc;
315 
316 	pdu.dif_ctx = &dif_ctx;
317 
318 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
319 			       0, 0, 0, 0, 0);
320 	CU_ASSERT(rc == 0);
321 
322 	/* Single iovec case */
323 	iovs[0].iov_base = (void *)0xDEADBEEF;
324 	iovs[0].iov_len = 2080;
325 
326 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
327 
328 	CU_ASSERT(dif_ctx.data_offset == 0);
329 	CU_ASSERT(pdu.data_len == 500);
330 	CU_ASSERT(pdu.data_iovcnt == 1);
331 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
332 	CU_ASSERT(pdu.data_iov[0].iov_len == 500);
333 
334 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
335 
336 	CU_ASSERT(dif_ctx.data_offset == 500);
337 	CU_ASSERT(pdu.data_len == 1000);
338 	CU_ASSERT(pdu.data_iovcnt == 1);
339 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
340 	CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
341 
342 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
343 
344 	CU_ASSERT(dif_ctx.data_offset == 1500);
345 	CU_ASSERT(pdu.data_len == 548);
346 	CU_ASSERT(pdu.data_iovcnt == 1);
347 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
348 	CU_ASSERT(pdu.data_iov[0].iov_len == 564);
349 
350 	/* Multiple iovecs case */
351 	iovs[0].iov_base = (void *)0xDEADBEEF;
352 	iovs[0].iov_len = 256;
353 	iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
354 	iovs[1].iov_len = 256 + 1;
355 	iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
356 	iovs[2].iov_len = 4;
357 	iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
358 	iovs[3].iov_len = 3 + 123;
359 	iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
360 	iovs[4].iov_len = 389 + 6;
361 	iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
362 	iovs[5].iov_len = 2 + 512 + 8 + 432;
363 	iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
364 	iovs[6].iov_len = 80 + 8;
365 
366 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
367 
368 	CU_ASSERT(dif_ctx.data_offset == 0);
369 	CU_ASSERT(pdu.data_len == 500);
370 	CU_ASSERT(pdu.data_iovcnt == 2);
371 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
372 	CU_ASSERT(pdu.data_iov[0].iov_len == 256);
373 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
374 	CU_ASSERT(pdu.data_iov[1].iov_len == 244);
375 
376 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
377 
378 	CU_ASSERT(dif_ctx.data_offset == 500);
379 	CU_ASSERT(pdu.data_len == 1000);
380 	CU_ASSERT(pdu.data_iovcnt == 5);
381 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
382 	CU_ASSERT(pdu.data_iov[0].iov_len == 13);
383 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
384 	CU_ASSERT(pdu.data_iov[1].iov_len == 4);
385 	CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
386 	CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
387 	CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
388 	CU_ASSERT(pdu.data_iov[3].iov_len == 395);
389 	CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
390 	CU_ASSERT(pdu.data_iov[4].iov_len == 478);
391 
392 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
393 
394 	CU_ASSERT(dif_ctx.data_offset == 1500);
395 	CU_ASSERT(pdu.data_len == 548);
396 	CU_ASSERT(pdu.data_iovcnt == 2);
397 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
398 	CU_ASSERT(pdu.data_iov[0].iov_len == 476);
399 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
400 	CU_ASSERT(pdu.data_iov[1].iov_len == 88);
401 }
402 
403 static void
404 test_nvme_tcp_build_iovs_with_md(void)
405 {
406 	struct nvme_tcp_pdu pdu = {};
407 	struct iovec iovs[11] = {};
408 	struct spdk_dif_ctx dif_ctx = {};
409 	uint32_t mapped_length = 0;
410 	int rc;
411 
412 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
413 			       0, 0, 0, 0, 0);
414 	CU_ASSERT(rc == 0);
415 
416 	pdu.dif_ctx = &dif_ctx;
417 
418 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
419 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
420 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
421 			      SPDK_NVME_TCP_DIGEST_LEN;
422 	pdu.data_len = 512 * 8;
423 	pdu.padding_len = 0;
424 
425 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
426 	pdu.data_iov[0].iov_len = (512 + 8) * 8;
427 	pdu.data_iovcnt = 1;
428 
429 	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
430 	CU_ASSERT(rc == 10);
431 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
432 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
433 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
434 	CU_ASSERT(iovs[1].iov_len == 512);
435 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
436 	CU_ASSERT(iovs[2].iov_len == 512);
437 	CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
438 	CU_ASSERT(iovs[3].iov_len == 512);
439 	CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
440 	CU_ASSERT(iovs[4].iov_len == 512);
441 	CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
442 	CU_ASSERT(iovs[5].iov_len == 512);
443 	CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
444 	CU_ASSERT(iovs[6].iov_len == 512);
445 	CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
446 	CU_ASSERT(iovs[7].iov_len == 512);
447 	CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
448 	CU_ASSERT(iovs[8].iov_len == 512);
449 	CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
450 	CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
451 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
452 		  512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
453 }
454 
455 /* Just define, nothing to do */
456 static void
457 ut_nvme_complete_request(void *arg, const struct spdk_nvme_cpl *cpl)
458 {
459 	return;
460 }
461 
462 static void
463 test_nvme_tcp_req_complete_safe(void)
464 {
465 	bool rc;
466 	struct nvme_tcp_req	tcp_req = {0};
467 	struct nvme_request	req = {{0}};
468 	struct nvme_tcp_qpair	tqpair = {{0}};
469 
470 	tcp_req.req = &req;
471 	tcp_req.req->qpair = &tqpair.qpair;
472 	tcp_req.req->cb_fn = ut_nvme_complete_request;
473 	tcp_req.tqpair = &tqpair;
474 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
475 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
476 
477 	/* Test case 1: send operation and transfer completed. Expect: PASS */
478 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
479 	tcp_req.ordering.bits.send_ack = 1;
480 	tcp_req.ordering.bits.data_recv = 1;
481 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
482 
483 	rc = nvme_tcp_req_complete_safe(&tcp_req);
484 	CU_ASSERT(rc == true);
485 
486 	/* Test case 2: send operation not completed. Expect: FAIL */
487 	tcp_req.ordering.raw = 0;
488 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
489 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
490 
491 	rc = nvme_tcp_req_complete_safe(&tcp_req);
492 	SPDK_CU_ASSERT_FATAL(rc != true);
493 	TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
494 
495 	/* Test case 3: in completion context. Expect: PASS */
496 	tqpair.qpair.in_completion_context = 1;
497 	tqpair.async_complete = 0;
498 	tcp_req.ordering.bits.send_ack = 1;
499 	tcp_req.ordering.bits.data_recv = 1;
500 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
501 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
502 
503 	rc = nvme_tcp_req_complete_safe(&tcp_req);
504 	CU_ASSERT(rc == true);
505 	CU_ASSERT(tcp_req.tqpair->async_complete == 0);
506 
507 	/* Test case 4: in async complete. Expect: PASS */
508 	tqpair.qpair.in_completion_context = 0;
509 	tcp_req.ordering.bits.send_ack = 1;
510 	tcp_req.ordering.bits.data_recv = 1;
511 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
512 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
513 
514 	rc = nvme_tcp_req_complete_safe(&tcp_req);
515 	CU_ASSERT(rc == true);
516 	CU_ASSERT(tcp_req.tqpair->async_complete);
517 }
518 
519 static void
520 test_nvme_tcp_req_init(void)
521 {
522 	struct nvme_tcp_qpair tqpair = {};
523 	struct nvme_request req = {};
524 	struct nvme_tcp_req tcp_req = {0};
525 	struct spdk_nvme_ctrlr ctrlr = {0};
526 	struct nvme_tcp_ut_bdev_io bio = {};
527 	int rc;
528 
529 	tqpair.qpair.ctrlr = &ctrlr;
530 	req.qpair = &tqpair.qpair;
531 
532 	tcp_req.cid = 1;
533 	req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
534 	req.payload.contig_or_cb_arg = &bio;
535 	req.payload_offset = 0;
536 	req.payload_size = 4096;
537 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
538 	ctrlr.ioccsz_bytes = 1024;
539 	bio.iovpos = 0;
540 	bio.iovs[0].iov_len = 8192;
541 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
542 
543 	/* Test case1: payload type SGL. Expect: PASS */
544 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
545 	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
546 
547 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
548 	CU_ASSERT(rc == 0);
549 	CU_ASSERT(tcp_req.req == &req);
550 	CU_ASSERT(tcp_req.in_capsule_data == true);
551 	CU_ASSERT(tcp_req.iovcnt == 1);
552 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
553 	CU_ASSERT(tcp_req.iov[0].iov_base == bio.iovs[0].iov_base);
554 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
555 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
556 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
557 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
558 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
559 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
560 
561 	/* Test case2: payload type CONTIG. Expect: PASS */
562 	memset(&req.cmd, 0, sizeof(req.cmd));
563 	memset(&tcp_req, 0, sizeof(tcp_req));
564 	tcp_req.cid = 1;
565 	req.payload.reset_sgl_fn = NULL;
566 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
567 
568 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
569 	CU_ASSERT(rc == 0);
570 	CU_ASSERT(tcp_req.req == &req);
571 	CU_ASSERT(tcp_req.in_capsule_data == true);
572 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
573 	CU_ASSERT(tcp_req.iov[0].iov_base == &bio);
574 	CU_ASSERT(tcp_req.iovcnt == 1);
575 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
576 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
577 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
578 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
579 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
580 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
581 
582 }
583 
584 static void
585 test_nvme_tcp_req_get(void)
586 {
587 	struct nvme_tcp_req tcp_req = {0};
588 	struct nvme_tcp_qpair tqpair = {};
589 	struct nvme_tcp_pdu send_pdu = {};
590 
591 	tcp_req.pdu = &send_pdu;
592 	tcp_req.state = NVME_TCP_REQ_FREE;
593 
594 	TAILQ_INIT(&tqpair.free_reqs);
595 	TAILQ_INIT(&tqpair.outstanding_reqs);
596 	TAILQ_INSERT_HEAD(&tqpair.free_reqs, &tcp_req, link);
597 
598 	CU_ASSERT(nvme_tcp_req_get(&tqpair) == &tcp_req);
599 	CU_ASSERT(tcp_req.state == NVME_TCP_REQ_ACTIVE);
600 	CU_ASSERT(tcp_req.datao == 0);
601 	CU_ASSERT(tcp_req.req == NULL);
602 	CU_ASSERT(tcp_req.in_capsule_data == false);
603 	CU_ASSERT(tcp_req.r2tl_remain == 0);
604 	CU_ASSERT(tcp_req.iovcnt == 0);
605 	CU_ASSERT(tcp_req.ordering.raw == 0);
606 	CU_ASSERT(!TAILQ_EMPTY(&tqpair.outstanding_reqs));
607 	CU_ASSERT(TAILQ_EMPTY(&tqpair.free_reqs));
608 
609 	/* No tcp request available, expect fail */
610 	SPDK_CU_ASSERT_FATAL(nvme_tcp_req_get(&tqpair) == NULL);
611 }
612 
613 static void
614 test_nvme_tcp_qpair_capsule_cmd_send(void)
615 {
616 	struct nvme_tcp_qpair tqpair = {};
617 	struct spdk_nvme_tcp_stat stats = {};
618 	struct nvme_tcp_req tcp_req = {};
619 	struct nvme_tcp_pdu pdu = {};
620 	struct nvme_request req = {};
621 	char iov_base0[4096];
622 	char iov_base1[4096];
623 	uint32_t plen;
624 	uint8_t pdo;
625 
626 	memset(iov_base0, 0xFF, 4096);
627 	memset(iov_base1, 0xFF, 4096);
628 	tcp_req.req = &req;
629 	tcp_req.pdu = &pdu;
630 	TAILQ_INIT(&tqpair.send_queue);
631 	tqpair.stats = &stats;
632 
633 	tcp_req.iov[0].iov_base = (void *)iov_base0;
634 	tcp_req.iov[0].iov_len = 4096;
635 	tcp_req.iov[1].iov_base = (void *)iov_base1;
636 	tcp_req.iov[1].iov_len = 4096;
637 	tcp_req.iovcnt = 2;
638 	tcp_req.req->payload_size = 8192;
639 	tcp_req.in_capsule_data = true;
640 	tqpair.cpda = NVME_TCP_HPDA_DEFAULT;
641 
642 	/* Test case 1: host hdgst and ddgst enable. Expect: PASS */
643 	tqpair.flags.host_hdgst_enable = 1;
644 	tqpair.flags.host_ddgst_enable = 1;
645 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd) +
646 		     SPDK_NVME_TCP_DIGEST_LEN;
647 	plen += tcp_req.req->payload_size;
648 	plen += SPDK_NVME_TCP_DIGEST_LEN;
649 
650 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
651 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
652 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
653 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
654 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
655 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
656 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
657 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
658 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
659 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
660 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
661 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
662 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
663 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
664 
665 	/* Test case 2: host hdgst and ddgst disable. Expect: PASS */
666 	memset(&pdu, 0, sizeof(pdu));
667 	tqpair.flags.host_hdgst_enable = 0;
668 	tqpair.flags.host_ddgst_enable = 0;
669 
670 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd);
671 	plen += tcp_req.req->payload_size;
672 
673 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
674 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
675 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags == 0)
676 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
677 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
678 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
679 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
680 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
681 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
682 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
683 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
684 
685 	/* Test case 3: padding available. Expect: PASS */
686 	memset(&pdu, 0, sizeof(pdu));
687 	tqpair.flags.host_hdgst_enable = 1;
688 	tqpair.flags.host_ddgst_enable = 1;
689 	tqpair.cpda = SPDK_NVME_TCP_CPDA_MAX;
690 
691 	pdo = plen = (SPDK_NVME_TCP_CPDA_MAX + 1) << 2;
692 	plen += tcp_req.req->payload_size;
693 	plen += SPDK_NVME_TCP_DIGEST_LEN;
694 
695 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
696 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
697 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
698 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
699 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
700 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
701 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
702 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
703 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
704 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
705 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
706 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
707 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
708 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
709 }
710 
711 /* Just define, nothing to do */
712 static void
713 ut_nvme_tcp_qpair_xfer_complete_cb(void *cb_arg)
714 {
715 	return;
716 }
717 
718 static void
719 test_nvme_tcp_qpair_write_pdu(void)
720 {
721 	struct nvme_tcp_qpair tqpair = {};
722 	struct spdk_nvme_tcp_stat stats = {};
723 	struct nvme_tcp_pdu pdu = {};
724 	void *cb_arg = (void *)0xDEADBEEF;
725 	char iov_base0[4096];
726 	char iov_base1[4096];
727 
728 	memset(iov_base0, 0xFF, 4096);
729 	memset(iov_base1, 0xFF, 4096);
730 	pdu.data_len = 4096 * 2;
731 	pdu.padding_len = 0;
732 	pdu.data_iov[0].iov_base = (void *)iov_base0;
733 	pdu.data_iov[0].iov_len = 4096;
734 	pdu.data_iov[1].iov_base = (void *)iov_base1;
735 	pdu.data_iov[1].iov_len = 4096;
736 	pdu.data_iovcnt = 2;
737 	TAILQ_INIT(&tqpair.send_queue);
738 
739 	/* Test case1: host hdgst and ddgst enable Expect: PASS */
740 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
741 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
742 
743 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
744 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
745 	pdu.hdr.common.plen = pdu.hdr.common.hlen +
746 			      SPDK_NVME_TCP_DIGEST_LEN * 2 ;
747 	pdu.hdr.common.plen += pdu.data_len;
748 	tqpair.flags.host_hdgst_enable = 1;
749 	tqpair.flags.host_ddgst_enable = 1;
750 	tqpair.stats = &stats;
751 
752 	nvme_tcp_qpair_write_pdu(&tqpair,
753 				 &pdu,
754 				 ut_nvme_tcp_qpair_xfer_complete_cb,
755 				 cb_arg);
756 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
757 	/* Check the crc data of header digest filled into raw */
758 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen]);
759 	CU_ASSERT(pdu.data_digest[0]);
760 	CU_ASSERT(pdu.sock_req.iovcnt == 4);
761 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
762 	CU_ASSERT(pdu.iov[0].iov_len == (sizeof(struct spdk_nvme_tcp_cmd) +
763 					 SPDK_NVME_TCP_DIGEST_LEN));
764 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
765 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
766 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
767 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
768 	CU_ASSERT(pdu.iov[3].iov_base == &pdu.data_digest);
769 	CU_ASSERT(pdu.iov[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
770 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
771 	CU_ASSERT(pdu.cb_arg == cb_arg);
772 	CU_ASSERT(pdu.qpair == &tqpair);
773 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
774 
775 	/* Test case2: host hdgst and ddgst disable Expect: PASS */
776 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
777 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
778 
779 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
780 	pdu.hdr.common.plen = pdu.hdr.common.hlen  + pdu.data_len;
781 	tqpair.flags.host_hdgst_enable = 0;
782 	tqpair.flags.host_ddgst_enable = 0;
783 
784 	nvme_tcp_qpair_write_pdu(&tqpair,
785 				 &pdu,
786 				 ut_nvme_tcp_qpair_xfer_complete_cb,
787 				 cb_arg);
788 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
789 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen] == 0);
790 	CU_ASSERT(pdu.data_digest[0] == 0);
791 	CU_ASSERT(pdu.sock_req.iovcnt == 3);
792 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
793 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd));
794 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
795 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
796 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
797 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
798 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
799 	CU_ASSERT(pdu.cb_arg == cb_arg);
800 	CU_ASSERT(pdu.qpair == &tqpair);
801 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
802 }
803 
804 static void
805 test_nvme_tcp_qpair_set_recv_state(void)
806 {
807 	struct nvme_tcp_qpair tqpair = {};
808 	enum nvme_tcp_pdu_recv_state state;
809 	struct nvme_tcp_pdu recv_pdu = {};
810 
811 	tqpair.recv_pdu = &recv_pdu;
812 
813 	/* case1: The recv state of tqpair is same with the state to be set */
814 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
815 	state = NVME_TCP_PDU_RECV_STATE_ERROR;
816 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
817 	CU_ASSERT(tqpair.recv_state == state);
818 
819 	/* case2: The recv state of tqpair is different with the state to be set */
820 	/* state is NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY or NVME_TCP_PDU_RECV_STATE_ERROR, tqpair->recv_pdu will be cleared */
821 	tqpair.recv_pdu->cb_arg = (void *)0xDEADBEEF;
822 	state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY;
823 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
824 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
825 	CU_ASSERT(tqpair.recv_pdu->cb_arg == (void *)0x0);
826 
827 	tqpair.recv_pdu->cb_arg = (void *)0xDEADBEEF;
828 	state = NVME_TCP_PDU_RECV_STATE_ERROR;
829 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
830 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
831 	CU_ASSERT(tqpair.recv_pdu->cb_arg == (void *)0x0);
832 
833 	/* state is NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH or NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH or NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD or default */
834 	state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
835 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
836 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
837 
838 	state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
839 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
840 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
841 
842 	state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
843 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
844 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
845 
846 	state = 0xff;
847 	nvme_tcp_qpair_set_recv_state(&tqpair, state);
848 	CU_ASSERT(tqpair.recv_state == 0xff);
849 }
850 
851 static void
852 test_nvme_tcp_alloc_reqs(void)
853 {
854 	struct nvme_tcp_qpair tqpair = {};
855 	int rc = 0;
856 
857 	/* case1: single entry. Expect: PASS */
858 	tqpair.num_entries = 1;
859 	rc = nvme_tcp_alloc_reqs(&tqpair);
860 	CU_ASSERT(rc == 0);
861 	CU_ASSERT(tqpair.tcp_reqs[0].cid == 0);
862 	CU_ASSERT(tqpair.tcp_reqs[0].tqpair == &tqpair);
863 	CU_ASSERT(tqpair.tcp_reqs[0].pdu == &tqpair.send_pdus[0]);
864 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
865 	free(tqpair.tcp_reqs);
866 	spdk_free(tqpair.send_pdus);
867 
868 	/* case2: multiple entries. Expect: PASS */
869 	tqpair.num_entries = 5;
870 	rc = nvme_tcp_alloc_reqs(&tqpair);
871 	CU_ASSERT(rc == 0);
872 	for (int i = 0; i < tqpair.num_entries; i++) {
873 		CU_ASSERT(tqpair.tcp_reqs[i].cid == i);
874 		CU_ASSERT(tqpair.tcp_reqs[i].tqpair == &tqpair);
875 		CU_ASSERT(tqpair.tcp_reqs[i].pdu == &tqpair.send_pdus[i]);
876 	}
877 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
878 
879 	/* case3: Test nvme_tcp_free_reqs test. Expect: PASS */
880 	nvme_tcp_free_reqs(&tqpair);
881 	CU_ASSERT(tqpair.tcp_reqs == NULL);
882 	CU_ASSERT(tqpair.send_pdus == NULL);
883 }
884 
885 static void
886 test_nvme_tcp_parse_addr(void)
887 {
888 	struct sockaddr_storage dst_addr;
889 	int rc = 0;
890 
891 	memset(&dst_addr, 0, sizeof(dst_addr));
892 	/* case1: getaddrinfo failed */
893 	rc = nvme_tcp_parse_addr(&dst_addr, AF_INET, NULL, NULL);
894 	CU_ASSERT(rc != 0);
895 
896 	/* case2: res->ai_addrlen < sizeof(*sa). Expect: Pass. */
897 	rc = nvme_tcp_parse_addr(&dst_addr, AF_INET, "12.34.56.78", "23");
898 	CU_ASSERT(rc == 0);
899 	CU_ASSERT(dst_addr.ss_family == AF_INET);
900 }
901 
902 static void
903 test_nvme_tcp_qpair_send_h2c_term_req(void)
904 {
905 	struct nvme_tcp_qpair tqpair = {};
906 	struct spdk_nvme_tcp_stat stats = {};
907 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
908 	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
909 	uint32_t error_offset = 1;
910 
911 	tqpair.send_pdu = &send_pdu;
912 	tqpair.recv_pdu = &recv_pdu;
913 	tqpair.stats = &stats;
914 	TAILQ_INIT(&tqpair.send_queue);
915 	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
916 	pdu.hdr.common.hlen = 64;
917 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
918 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
919 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
920 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
921 		  pdu.hdr.common.hlen);
922 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
923 
924 	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
925 	pdu.hdr.common.hlen = 255;
926 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
927 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
928 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
929 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
930 		  tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
931 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
932 }
933 
934 static void
935 test_nvme_tcp_pdu_ch_handle(void)
936 {
937 	struct nvme_tcp_qpair tqpair = {};
938 	struct spdk_nvme_tcp_stat stats = {};
939 	struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
940 
941 	tqpair.send_pdu = &send_pdu;
942 	tqpair.recv_pdu = &recv_pdu;
943 	tqpair.stats = &stats;
944 	TAILQ_INIT(&tqpair.send_queue);
945 	/* case 1: Already received IC_RESP PDU. Expect: fail */
946 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
947 	tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
948 	nvme_tcp_pdu_ch_handle(&tqpair);
949 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
950 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
951 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
952 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
953 
954 	/* case 2: Expected PDU header length and received are different. Expect: fail */
955 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
956 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
957 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
958 	tqpair.recv_pdu->hdr.common.hlen = 0;
959 	nvme_tcp_pdu_ch_handle(&tqpair);
960 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
961 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
962 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
963 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
964 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
965 
966 	/* case 3: The TCP/IP tqpair connection is not negotitated. Expect: fail */
967 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
968 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
969 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
970 	tqpair.recv_pdu->hdr.common.hlen = 0;
971 	nvme_tcp_pdu_ch_handle(&tqpair);
972 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
973 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
974 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
975 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
976 
977 	/* case 4: Unexpected PDU type. Expect: fail */
978 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
979 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
980 	tqpair.recv_pdu->hdr.common.plen = 0;
981 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
982 	nvme_tcp_pdu_ch_handle(&tqpair);
983 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
984 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
985 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
986 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
987 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
988 
989 	/* case 5: plen error. Expect: fail */
990 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
991 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
992 	tqpair.recv_pdu->hdr.common.plen = 0;
993 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
994 	nvme_tcp_pdu_ch_handle(&tqpair);
995 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
996 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
997 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
998 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
999 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
1000 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1001 
1002 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
1003 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1004 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1005 	tqpair.recv_pdu->hdr.common.plen = 0;
1006 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
1007 	nvme_tcp_pdu_ch_handle(&tqpair);
1008 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1009 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1010 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1011 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1012 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1013 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1014 
1015 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
1016 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1017 	tqpair.recv_pdu->hdr.common.plen = 0;
1018 	tqpair.recv_pdu->hdr.common.pdo = 64;
1019 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
1020 	nvme_tcp_pdu_ch_handle(&tqpair);
1021 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1022 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1023 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1024 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1025 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1026 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1027 
1028 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1029 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1030 	tqpair.recv_pdu->hdr.common.plen = 0;
1031 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
1032 	nvme_tcp_pdu_ch_handle(&tqpair);
1033 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1034 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1035 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1036 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1037 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1038 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1039 
1040 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1041 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1042 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1043 	tqpair.recv_pdu->hdr.common.plen = 0;
1044 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
1045 	nvme_tcp_pdu_ch_handle(&tqpair);
1046 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1047 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1048 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1049 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1050 		  (unsigned)sizeof(struct spdk_nvme_tcp_r2t_hdr));
1051 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1052 
1053 	/* case 6: Expect:  PASS */
1054 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1055 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
1056 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1057 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1058 	nvme_tcp_pdu_ch_handle(&tqpair);
1059 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1060 	CU_ASSERT(tqpair.recv_pdu->psh_len == tqpair.recv_pdu->hdr.common.hlen - sizeof(
1061 			  struct spdk_nvme_tcp_common_pdu_hdr));
1062 }
1063 
1064 DEFINE_RETURN_MOCK(spdk_sock_connect_ext, struct spdk_sock *);
1065 struct spdk_sock *
1066 spdk_sock_connect_ext(const char *ip, int port,
1067 		      char *_impl_name, struct spdk_sock_opts *opts)
1068 {
1069 	HANDLE_RETURN_MOCK(spdk_sock_connect_ext);
1070 	CU_ASSERT(port == 23);
1071 	CU_ASSERT(opts->opts_size == sizeof(*opts));
1072 	CU_ASSERT(opts->priority == 1);
1073 	CU_ASSERT(opts->zcopy == true);
1074 	CU_ASSERT(!strcmp(ip, "192.168.1.78"));
1075 	return (struct spdk_sock *)0xDDADBEEF;
1076 }
1077 
1078 static void
1079 test_nvme_tcp_qpair_connect_sock(void)
1080 {
1081 	struct spdk_nvme_ctrlr ctrlr = {};
1082 	struct nvme_tcp_qpair tqpair = {};
1083 	int rc;
1084 
1085 	tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1086 	tqpair.qpair.id = 1;
1087 	tqpair.qpair.poll_group = (void *)0xDEADBEEF;
1088 	ctrlr.trid.priority = 1;
1089 	ctrlr.trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1090 	memcpy(ctrlr.trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1091 	memcpy(ctrlr.trid.trsvcid, "23", sizeof("23"));
1092 	memcpy(ctrlr.opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1093 	memcpy(ctrlr.opts.src_svcid, "23", sizeof("23"));
1094 
1095 	rc = nvme_tcp_qpair_connect_sock(&ctrlr, &tqpair.qpair);
1096 	CU_ASSERT(rc == 0);
1097 
1098 	/* Unsupported family of the transport address */
1099 	ctrlr.trid.adrfam = SPDK_NVMF_ADRFAM_IB;
1100 
1101 	rc = nvme_tcp_qpair_connect_sock(&ctrlr, &tqpair.qpair);
1102 	SPDK_CU_ASSERT_FATAL(rc == -1);
1103 
1104 	/* Invalid dst_port, INT_MAX is 2147483647 */
1105 	ctrlr.trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1106 	memcpy(ctrlr.trid.trsvcid, "2147483647", sizeof("2147483647"));
1107 
1108 	rc = nvme_tcp_qpair_connect_sock(&ctrlr, &tqpair.qpair);
1109 	SPDK_CU_ASSERT_FATAL(rc == -1);
1110 
1111 	/* Parse invalid address */
1112 	memcpy(ctrlr.trid.trsvcid, "23", sizeof("23"));
1113 	memcpy(ctrlr.trid.traddr, "192.168.1.256", sizeof("192.168.1.256"));
1114 
1115 	rc = nvme_tcp_qpair_connect_sock(&ctrlr, &tqpair.qpair);
1116 	SPDK_CU_ASSERT_FATAL(rc != 0);
1117 }
1118 
1119 static void
1120 test_nvme_tcp_qpair_icreq_send(void)
1121 {
1122 	struct nvme_tcp_qpair tqpair = {};
1123 	struct spdk_nvme_tcp_stat stats = {};
1124 	struct spdk_nvme_ctrlr ctrlr = {};
1125 	struct nvme_tcp_pdu pdu = {};
1126 	struct nvme_tcp_poll_group poll_group = {};
1127 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1128 	int rc;
1129 
1130 	tqpair.send_pdu = &pdu;
1131 	tqpair.qpair.ctrlr = &ctrlr;
1132 	tqpair.qpair.poll_group = &poll_group.group;
1133 	tqpair.stats = &stats;
1134 	ic_req = &pdu.hdr.ic_req;
1135 
1136 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1137 	tqpair.qpair.ctrlr->opts.header_digest = true;
1138 	tqpair.qpair.ctrlr->opts.data_digest = true;
1139 	TAILQ_INIT(&tqpair.send_queue);
1140 
1141 	rc = nvme_tcp_qpair_icreq_send(&tqpair);
1142 	CU_ASSERT(rc == 0);
1143 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1144 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1145 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1146 	CU_ASSERT(ic_req->pfv == 0);
1147 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1148 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1149 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1150 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1151 }
1152 
1153 static void
1154 test_nvme_tcp_c2h_payload_handle(void)
1155 {
1156 	struct nvme_tcp_qpair tqpair = {};
1157 	struct spdk_nvme_tcp_stat stats = {};
1158 	struct nvme_tcp_pdu pdu = {};
1159 	struct nvme_tcp_req tcp_req = {};
1160 	struct nvme_request	req = {};
1161 	struct nvme_tcp_pdu recv_pdu = {};
1162 	uint32_t reaped = 1;
1163 
1164 	tcp_req.req = &req;
1165 	tcp_req.req->qpair = &tqpair.qpair;
1166 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1167 	tcp_req.tqpair = &tqpair;
1168 	tcp_req.cid = 1;
1169 	tqpair.stats = &stats;
1170 
1171 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1172 
1173 	pdu.req = &tcp_req;
1174 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1175 					SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1176 	pdu.data_len = 1024;
1177 
1178 	tqpair.qpair.id = 1;
1179 	tqpair.recv_pdu = &recv_pdu;
1180 
1181 	/* case 1: nvme_tcp_c2h_data_payload_handle: tcp_req->datao != tcp_req->req->payload_size */
1182 	tcp_req.datao = 1024;
1183 	tcp_req.req->payload_size = 2048;
1184 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1185 	tcp_req.ordering.bits.send_ack = 1;
1186 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1187 	tcp_req.ordering.bits.data_recv = 0;
1188 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1189 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1190 
1191 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1192 
1193 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1194 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1195 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1196 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1197 	CU_ASSERT(reaped == 2);
1198 
1199 	/* case 2: nvme_tcp_c2h_data_payload_handle: tcp_req->datao == tcp_req->req->payload_size */
1200 	tcp_req.datao = 1024;
1201 	tcp_req.req->payload_size = 1024;
1202 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1203 	tcp_req.ordering.bits.send_ack = 1;
1204 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1205 	tcp_req.ordering.bits.data_recv = 0;
1206 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1207 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1208 
1209 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1210 
1211 	CU_ASSERT(tcp_req.rsp.status.p == 1);
1212 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1213 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1214 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1215 	CU_ASSERT(reaped == 3);
1216 
1217 	/* case 3: nvme_tcp_c2h_data_payload_handle: flag does not have SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS */
1218 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1219 	tcp_req.datao = 1024;
1220 	tcp_req.req->payload_size = 1024;
1221 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1222 	tcp_req.ordering.bits.send_ack = 1;
1223 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1224 	tcp_req.ordering.bits.data_recv = 0;
1225 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1226 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1227 
1228 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1229 
1230 	CU_ASSERT(reaped == 3);
1231 
1232 	/* case 4: nvme_tcp_c2h_term_req_payload_handle: recv_state is NVME_TCP_PDU_RECV_STATE_ERROR */
1233 	pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1234 	nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
1235 
1236 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1237 }
1238 
1239 static void
1240 test_nvme_tcp_icresp_handle(void)
1241 {
1242 	struct nvme_tcp_qpair tqpair = {};
1243 	struct spdk_nvme_tcp_stat stats = {};
1244 	struct nvme_tcp_pdu pdu = {};
1245 	struct nvme_tcp_pdu send_pdu = {};
1246 	struct nvme_tcp_pdu recv_pdu = {};
1247 
1248 	tqpair.send_pdu = &send_pdu;
1249 	tqpair.recv_pdu = &recv_pdu;
1250 	tqpair.stats = &stats;
1251 	TAILQ_INIT(&tqpair.send_queue);
1252 
1253 	/* case 1: Expected ICResp PFV and got are different. */
1254 	pdu.hdr.ic_resp.pfv = 1;
1255 
1256 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1257 
1258 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1259 
1260 	/* case 2: Expected ICResp maxh2cdata and got are different. */
1261 	pdu.hdr.ic_resp.pfv = 0;
1262 	pdu.hdr.ic_resp.maxh2cdata = 2048;
1263 
1264 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1265 
1266 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1267 
1268 	/* case 3: Expected ICResp cpda and got are different. */
1269 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1270 	pdu.hdr.ic_resp.cpda = 64;
1271 
1272 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1273 
1274 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1275 
1276 	/* case 4: waiting icreq ack. */
1277 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1278 	pdu.hdr.ic_resp.cpda = 30;
1279 	pdu.hdr.ic_resp.dgst.bits.hdgst_enable = true;
1280 	pdu.hdr.ic_resp.dgst.bits.ddgst_enable = true;
1281 	tqpair.flags.icreq_send_ack = 0;
1282 
1283 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1284 
1285 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1286 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_INITIALIZING);
1287 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1288 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1289 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1290 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1291 
1292 	/* case 5: Expect: PASS. */
1293 	tqpair.flags.icreq_send_ack = 1;
1294 
1295 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1296 
1297 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1298 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND);
1299 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1300 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1301 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1302 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1303 }
1304 
1305 static void
1306 test_nvme_tcp_pdu_payload_handle(void)
1307 {
1308 	struct nvme_tcp_qpair	tqpair = {};
1309 	struct spdk_nvme_tcp_stat	stats = {};
1310 	struct nvme_tcp_pdu	recv_pdu = {};
1311 	struct nvme_tcp_req	tcp_req = {};
1312 	struct nvme_request	req = {};
1313 	uint32_t		reaped = 0;
1314 
1315 	tqpair.recv_pdu = &recv_pdu;
1316 	tcp_req.tqpair = &tqpair;
1317 	tcp_req.req = &req;
1318 	tcp_req.req->qpair = &tqpair.qpair;
1319 	tqpair.stats = &stats;
1320 
1321 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1322 	tqpair.qpair.id = 1;
1323 	recv_pdu.ddgst_enable = false;
1324 	recv_pdu.req = &tcp_req;
1325 	recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1326 					     SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1327 	recv_pdu.data_len = 1024;
1328 	tcp_req.ordering.bits.data_recv = 0;
1329 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1330 	tcp_req.cid = 1;
1331 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1332 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1333 
1334 	/* C2H_DATA */
1335 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
1336 	tcp_req.datao = 1024;
1337 	tcp_req.req->payload_size = 2048;
1338 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1339 	tcp_req.ordering.bits.send_ack = 1;
1340 
1341 	recv_pdu.req = &tcp_req;
1342 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1343 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1344 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1345 	CU_ASSERT(tcp_req.rsp.cid == 1);
1346 	CU_ASSERT(tcp_req.rsp.sqid == 1);
1347 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1348 	CU_ASSERT(reaped == 1);
1349 
1350 	/* TermResp */
1351 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1352 	recv_pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1353 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1354 
1355 	recv_pdu.req = &tcp_req;
1356 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1357 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1358 }
1359 
1360 static void
1361 test_nvme_tcp_capsule_resp_hdr_handle(void)
1362 {
1363 	struct nvme_tcp_qpair	tqpair = {};
1364 	struct spdk_nvme_tcp_stat	stats = {};
1365 	struct nvme_request	req = {};
1366 	struct spdk_nvme_cpl	rccqe_tgt = {};
1367 	struct nvme_tcp_req	*tcp_req = NULL;
1368 	uint32_t		reaped = 0;
1369 	int			rc;
1370 
1371 	/* Initialize requests and pdus */
1372 	tqpair.num_entries = 1;
1373 	tqpair.stats = &stats;
1374 	req.qpair = &tqpair.qpair;
1375 
1376 	rc = nvme_tcp_alloc_reqs(&tqpair);
1377 	SPDK_CU_ASSERT_FATAL(rc == 0);
1378 	tcp_req = nvme_tcp_req_get(&tqpair);
1379 	SPDK_CU_ASSERT_FATAL(tcp_req != NULL);
1380 	rc = nvme_tcp_req_init(&tqpair, &req, tcp_req);
1381 	SPDK_CU_ASSERT_FATAL(rc == 0);
1382 	tcp_req->ordering.bits.send_ack = 1;
1383 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1384 	/* tqpair.recv_pdu will be reseted after handling */
1385 	memset(&rccqe_tgt, 0xff, sizeof(rccqe_tgt));
1386 	rccqe_tgt.cid = 0;
1387 	memcpy(&tqpair.recv_pdu->hdr.capsule_resp.rccqe, &rccqe_tgt, sizeof(rccqe_tgt));
1388 
1389 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1390 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1391 	CU_ASSERT(!memcmp(&tcp_req->rsp, &rccqe_tgt, sizeof(rccqe_tgt)));
1392 	CU_ASSERT(tcp_req->ordering.bits.data_recv == 1);
1393 	CU_ASSERT(reaped == 1);
1394 	CU_ASSERT(TAILQ_EMPTY(&tcp_req->tqpair->outstanding_reqs));
1395 
1396 	/* Get tcp request error, expect fail */
1397 	reaped = 0;
1398 	tqpair.recv_pdu->hdr.capsule_resp.rccqe.cid = 1;
1399 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1400 
1401 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1402 	CU_ASSERT(reaped == 0);
1403 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
1404 	nvme_tcp_free_reqs(&tqpair);
1405 }
1406 
1407 static void
1408 test_nvme_tcp_ctrlr_connect_qpair(void)
1409 {
1410 	struct spdk_nvme_ctrlr ctrlr = {};
1411 	struct spdk_nvme_qpair *qpair;
1412 	struct nvme_tcp_qpair *tqpair;
1413 	struct nvme_tcp_pdu pdu = {};
1414 	struct nvme_tcp_pdu recv_pdu = {};
1415 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1416 	int rc;
1417 
1418 	tqpair = calloc(1, sizeof(*tqpair));
1419 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1420 	tqpair->recv_pdu = &recv_pdu;
1421 	qpair = &tqpair->qpair;
1422 	tqpair->sock = (struct spdk_sock *)0xDEADBEEF;
1423 	tqpair->send_pdu = &pdu;
1424 	tqpair->qpair.ctrlr = &ctrlr;
1425 	tqpair->qpair.state = NVME_QPAIR_CONNECTING;
1426 	ic_req = &pdu.hdr.ic_req;
1427 
1428 	tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1429 	tqpair->recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1430 	tqpair->recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1431 	tqpair->recv_pdu->ch_valid_bytes = 8;
1432 	tqpair->recv_pdu->psh_valid_bytes = tqpair->recv_pdu->hdr.common.hlen;
1433 	tqpair->recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
1434 	tqpair->recv_pdu->hdr.ic_resp.cpda = 1;
1435 	tqpair->flags.icreq_send_ack = 1;
1436 	tqpair->qpair.ctrlr->opts.header_digest = true;
1437 	tqpair->qpair.ctrlr->opts.data_digest = true;
1438 	TAILQ_INIT(&tqpair->send_queue);
1439 
1440 	rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
1441 	CU_ASSERT(rc == 0);
1442 
1443 	while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
1444 		rc = nvme_tcp_qpair_process_completions(qpair, 0);
1445 		CU_ASSERT(rc >= 0);
1446 	}
1447 
1448 	CU_ASSERT(tqpair->maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
1449 	CU_ASSERT(tqpair->state == NVME_TCP_QPAIR_STATE_RUNNING);
1450 	CU_ASSERT(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1451 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1452 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1453 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1454 	CU_ASSERT(ic_req->pfv == 0);
1455 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1456 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1457 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1458 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1459 
1460 	nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1461 }
1462 
1463 static void
1464 test_nvme_tcp_ctrlr_disconnect_qpair(void)
1465 {
1466 	struct spdk_nvme_ctrlr ctrlr = {};
1467 	struct spdk_nvme_qpair *qpair;
1468 	struct nvme_tcp_qpair tqpair = {
1469 		.qpair.trtype = SPDK_NVME_TRANSPORT_TCP,
1470 	};
1471 	struct nvme_tcp_poll_group tgroup = {};
1472 	struct nvme_tcp_pdu pdu = {};
1473 
1474 	qpair = &tqpair.qpair;
1475 	qpair->poll_group = &tgroup.group;
1476 	tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
1477 	tqpair.needs_poll = true;
1478 	TAILQ_INIT(&tgroup.needs_poll);
1479 	TAILQ_INIT(&tqpair.send_queue);
1480 	TAILQ_INSERT_TAIL(&tgroup.needs_poll, &tqpair, link);
1481 	TAILQ_INSERT_TAIL(&tqpair.send_queue, &pdu, tailq);
1482 
1483 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1484 
1485 	CU_ASSERT(tqpair.needs_poll == false);
1486 	CU_ASSERT(tqpair.sock == NULL);
1487 	CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue) == true);
1488 }
1489 
1490 static void
1491 test_nvme_tcp_ctrlr_create_io_qpair(void)
1492 {
1493 	struct spdk_nvme_qpair *qpair = NULL;
1494 	struct spdk_nvme_ctrlr ctrlr = {};
1495 	uint16_t qid = 1;
1496 	const struct spdk_nvme_io_qpair_opts opts = {
1497 		.io_queue_size = 1,
1498 		.qprio = SPDK_NVME_QPRIO_URGENT,
1499 		.io_queue_requests = 1,
1500 	};
1501 	struct nvme_tcp_qpair *tqpair;
1502 
1503 	ctrlr.trid.priority = 1;
1504 	ctrlr.trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1505 	memcpy(ctrlr.trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1506 	memcpy(ctrlr.trid.trsvcid, "23", sizeof("23"));
1507 	memcpy(ctrlr.opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1508 	memcpy(ctrlr.opts.src_svcid, "23", sizeof("23"));
1509 
1510 	qpair = nvme_tcp_ctrlr_create_io_qpair(&ctrlr, qid, &opts);
1511 	tqpair = nvme_tcp_qpair(qpair);
1512 
1513 	CU_ASSERT(qpair != NULL);
1514 	CU_ASSERT(qpair->id == 1);
1515 	CU_ASSERT(qpair->ctrlr == &ctrlr);
1516 	CU_ASSERT(qpair->qprio == SPDK_NVME_QPRIO_URGENT);
1517 	CU_ASSERT(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
1518 	CU_ASSERT(qpair->poll_group == (void *)0xDEADBEEF);
1519 	CU_ASSERT(tqpair->num_entries = 1);
1520 
1521 	free(tqpair->tcp_reqs);
1522 	spdk_free(tqpair->send_pdus);
1523 	free(tqpair);
1524 }
1525 
1526 static void
1527 test_nvme_tcp_ctrlr_delete_io_qpair(void)
1528 {
1529 	struct spdk_nvme_ctrlr *ctrlr = (struct spdk_nvme_ctrlr *)0xdeadbeef;
1530 	struct spdk_nvme_qpair *qpair;
1531 	struct nvme_tcp_qpair *tqpair;
1532 	struct nvme_tcp_req tcp_req = {};
1533 	struct nvme_request	req = {};
1534 	int rc;
1535 
1536 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1537 	tqpair->tcp_reqs = calloc(1, sizeof(struct nvme_tcp_req));
1538 	tqpair->send_pdus = calloc(1, sizeof(struct nvme_tcp_pdu));
1539 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1540 	qpair = &tqpair->qpair;
1541 	tcp_req.req = &req;
1542 	tcp_req.req->qpair = &tqpair->qpair;
1543 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1544 	tcp_req.tqpair = tqpair;
1545 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1546 	TAILQ_INIT(&tqpair->outstanding_reqs);
1547 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1548 
1549 	rc = nvme_tcp_ctrlr_delete_io_qpair(ctrlr, qpair);
1550 
1551 	CU_ASSERT(rc == 0);
1552 }
1553 
1554 int main(int argc, char **argv)
1555 {
1556 	CU_pSuite	suite = NULL;
1557 	unsigned int	num_failures;
1558 
1559 	CU_set_error_action(CUEA_ABORT);
1560 	CU_initialize_registry();
1561 
1562 	suite = CU_add_suite("nvme_tcp", NULL, NULL);
1563 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
1564 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
1565 	CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
1566 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
1567 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
1568 	CU_ADD_TEST(suite, test_nvme_tcp_req_complete_safe);
1569 	CU_ADD_TEST(suite, test_nvme_tcp_req_get);
1570 	CU_ADD_TEST(suite, test_nvme_tcp_req_init);
1571 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_capsule_cmd_send);
1572 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_write_pdu);
1573 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_set_recv_state);
1574 	CU_ADD_TEST(suite, test_nvme_tcp_alloc_reqs);
1575 	CU_ADD_TEST(suite, test_nvme_tcp_parse_addr);
1576 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_send_h2c_term_req);
1577 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_ch_handle);
1578 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_connect_sock);
1579 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_icreq_send);
1580 	CU_ADD_TEST(suite, test_nvme_tcp_c2h_payload_handle);
1581 	CU_ADD_TEST(suite, test_nvme_tcp_icresp_handle);
1582 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_payload_handle);
1583 	CU_ADD_TEST(suite, test_nvme_tcp_capsule_resp_hdr_handle);
1584 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_connect_qpair);
1585 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_disconnect_qpair);
1586 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_create_io_qpair);
1587 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_delete_io_qpair);
1588 
1589 	CU_basic_set_mode(CU_BRM_VERBOSE);
1590 	CU_basic_run_tests();
1591 	num_failures = CU_get_number_of_failures();
1592 	CU_cleanup_registry();
1593 	return num_failures;
1594 }
1595