xref: /spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c (revision a4009e7ad3d0aa0cfda4ce321e22161cbd1a26dc)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk/nvme.h"
9 
10 #include "spdk_internal/cunit.h"
11 
12 #include "common/lib/test_sock.c"
13 #include "nvme/nvme_internal.h"
14 #include "common/lib/nvme/common_stubs.h"
15 
16 /* nvme_transport_ctrlr_disconnect_qpair_done() stub is defined in common_stubs.h, but we need to
17  * override it here */
18 static void nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair);
19 #define nvme_transport_ctrlr_disconnect_qpair_done nvme_transport_ctrlr_disconnect_qpair_done_mocked
20 
21 #include "nvme/nvme_tcp.c"
22 
23 SPDK_LOG_REGISTER_COMPONENT(nvme)
24 
25 DEFINE_STUB(nvme_qpair_submit_request,
26 	    int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
27 
28 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
29 		struct spdk_nvme_qpair *qpair), 0);
30 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
31 	    int,
32 	    (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint),
33 	    0);
34 
35 DEFINE_STUB(spdk_sock_group_get_ctx,
36 	    void *,
37 	    (struct spdk_sock_group *group),
38 	    NULL);
39 
40 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
41 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
42 
43 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
44 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
45 
46 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
47 		struct spdk_nvme_cmd *cmd));
48 
49 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
50 		struct spdk_nvme_cpl *cpl));
51 
52 static void
53 nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair)
54 {
55 	qpair->state = NVME_QPAIR_DISCONNECTED;
56 }
57 
58 static void
59 test_nvme_tcp_pdu_set_data_buf(void)
60 {
61 	struct nvme_tcp_pdu pdu = {};
62 	struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
63 	uint32_t data_len;
64 	uint64_t i;
65 
66 	/* 1st case: input is a single SGL entry. */
67 	iov[0].iov_base = (void *)0xDEADBEEF;
68 	iov[0].iov_len = 4096;
69 
70 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
71 
72 	CU_ASSERT(pdu.data_iovcnt == 1);
73 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
74 	CU_ASSERT(pdu.data_iov[0].iov_len == 512);
75 
76 	/* 2nd case: simulate split on multiple SGL entries. */
77 	iov[0].iov_base = (void *)0xDEADBEEF;
78 	iov[0].iov_len = 4096;
79 	iov[1].iov_base = (void *)0xFEEDBEEF;
80 	iov[1].iov_len = 512 * 7;
81 	iov[2].iov_base = (void *)0xF00DF00D;
82 	iov[2].iov_len = 4096 * 2;
83 
84 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
85 
86 	CU_ASSERT(pdu.data_iovcnt == 1);
87 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
88 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
89 
90 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
91 
92 	CU_ASSERT(pdu.data_iovcnt == 2);
93 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
94 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
95 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
96 	CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
97 
98 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
99 
100 	CU_ASSERT(pdu.data_iovcnt == 2);
101 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
102 	CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
103 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
104 	CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
105 
106 	/* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
107 	 * entries.
108 	 */
109 	data_len = 0;
110 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
111 		iov[i].iov_base = (void *)(0xDEADBEEF + i);
112 		iov[i].iov_len = 512 * (i + 1);
113 		data_len += 512 * (i + 1);
114 	}
115 
116 	nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
117 
118 	CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
119 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
120 		CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
121 		CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
122 	}
123 }
124 
125 static void
126 test_nvme_tcp_build_iovs(void)
127 {
128 	const uintptr_t pdu_iov_len = 4096;
129 	struct nvme_tcp_pdu pdu = {};
130 	struct iovec iovs[5] = {};
131 	uint32_t mapped_length = 0;
132 	int rc;
133 
134 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
135 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
136 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
137 			      SPDK_NVME_TCP_DIGEST_LEN;
138 	pdu.data_len = pdu_iov_len * 2;
139 	pdu.padding_len = 0;
140 
141 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
142 	pdu.data_iov[0].iov_len = pdu_iov_len;
143 	pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
144 	pdu.data_iov[1].iov_len = pdu_iov_len;
145 	pdu.data_iovcnt = 2;
146 
147 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
148 	CU_ASSERT(rc == 4);
149 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
150 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
151 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
152 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
153 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
154 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
155 	CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
156 	CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
157 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
158 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
159 
160 	/* Add a new data_iov entry, update pdu iov count and data length */
161 	pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
162 	pdu.data_iov[2].iov_len = 123;
163 	pdu.data_iovcnt = 3;
164 	pdu.data_len += 123;
165 	pdu.hdr.common.plen += 123;
166 
167 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
168 	CU_ASSERT(rc == 5);
169 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
170 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
171 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
172 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
173 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
174 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
175 	CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
176 	CU_ASSERT(iovs[3].iov_len == 123);
177 	CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
178 	CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
179 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
180 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
181 }
182 
183 struct nvme_tcp_ut_bdev_io {
184 	struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
185 	int iovpos;
186 };
187 
188 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
189 static void
190 nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
191 {
192 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
193 	struct iovec *iov;
194 
195 	for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
196 		iov = &bio->iovs[bio->iovpos];
197 		/* Offset must be aligned with the start of any SGL entry */
198 		if (offset == 0) {
199 			break;
200 		}
201 
202 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
203 		offset -= iov->iov_len;
204 	}
205 
206 	SPDK_CU_ASSERT_FATAL(offset == 0);
207 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
208 }
209 
210 static int
211 nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
212 {
213 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
214 	struct iovec *iov;
215 
216 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
217 
218 	iov = &bio->iovs[bio->iovpos];
219 
220 	*address = iov->iov_base;
221 	*length = iov->iov_len;
222 	bio->iovpos++;
223 
224 	return 0;
225 }
226 
227 static void
228 test_nvme_tcp_build_sgl_request(void)
229 {
230 	struct nvme_tcp_qpair tqpair;
231 	struct spdk_nvme_ctrlr ctrlr = {{0}};
232 	struct nvme_tcp_req tcp_req = {0};
233 	struct nvme_request req = {{0}};
234 	struct nvme_tcp_ut_bdev_io bio;
235 	uint64_t i;
236 	int rc;
237 
238 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
239 	tqpair.qpair.ctrlr = &ctrlr;
240 	tcp_req.req = &req;
241 
242 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
243 	req.qpair = &tqpair.qpair;
244 
245 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
246 		bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
247 		bio.iovs[i].iov_len = 0;
248 	}
249 
250 	/* Test case 1: Single SGL. Expected: PASS */
251 	bio.iovpos = 0;
252 	req.payload_offset = 0;
253 	req.payload_size = 0x1000;
254 	bio.iovs[0].iov_len = 0x1000;
255 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
256 	SPDK_CU_ASSERT_FATAL(rc == 0);
257 	CU_ASSERT(bio.iovpos == 1);
258 	CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
259 	CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
260 	CU_ASSERT(tcp_req.iovcnt == 1);
261 
262 	/* Test case 2: Multiple SGL. Expected: PASS */
263 	bio.iovpos = 0;
264 	req.payload_offset = 0;
265 	req.payload_size = 0x4000;
266 	for (i = 0; i < 4; i++) {
267 		bio.iovs[i].iov_len = 0x1000;
268 	}
269 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
270 	SPDK_CU_ASSERT_FATAL(rc == 0);
271 	CU_ASSERT(bio.iovpos == 4);
272 	CU_ASSERT(tcp_req.iovcnt == 4);
273 	for (i = 0; i < 4; i++) {
274 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
275 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
276 	}
277 
278 	/* Test case 3: Payload is bigger than SGL. Expected: FAIL */
279 	bio.iovpos = 0;
280 	req.payload_offset = 0;
281 	req.payload_size = 0x17000;
282 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
283 		bio.iovs[i].iov_len = 0x1000;
284 	}
285 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
286 	SPDK_CU_ASSERT_FATAL(rc != 0);
287 	CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
288 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
289 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
290 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
291 	}
292 }
293 
294 static void
295 test_nvme_tcp_pdu_set_data_buf_with_md(void)
296 {
297 	struct nvme_tcp_pdu pdu = {};
298 	struct iovec iovs[7] = {};
299 	struct spdk_dif_ctx dif_ctx = {};
300 	int rc;
301 	struct spdk_dif_ctx_init_ext_opts dif_opts;
302 
303 	pdu.dif_ctx = &dif_ctx;
304 
305 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
306 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
307 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
308 			       0, 0, 0, 0, 0, &dif_opts);
309 	CU_ASSERT(rc == 0);
310 
311 	/* Single iovec case */
312 	iovs[0].iov_base = (void *)0xDEADBEEF;
313 	iovs[0].iov_len = 2080;
314 
315 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
316 
317 	CU_ASSERT(dif_ctx.data_offset == 0);
318 	CU_ASSERT(pdu.data_len == 500);
319 	CU_ASSERT(pdu.data_iovcnt == 1);
320 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
321 	CU_ASSERT(pdu.data_iov[0].iov_len == 500);
322 
323 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
324 
325 	CU_ASSERT(dif_ctx.data_offset == 500);
326 	CU_ASSERT(pdu.data_len == 1000);
327 	CU_ASSERT(pdu.data_iovcnt == 1);
328 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
329 	CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
330 
331 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
332 
333 	CU_ASSERT(dif_ctx.data_offset == 1500);
334 	CU_ASSERT(pdu.data_len == 548);
335 	CU_ASSERT(pdu.data_iovcnt == 1);
336 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
337 	CU_ASSERT(pdu.data_iov[0].iov_len == 564);
338 
339 	/* Multiple iovecs case */
340 	iovs[0].iov_base = (void *)0xDEADBEEF;
341 	iovs[0].iov_len = 256;
342 	iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
343 	iovs[1].iov_len = 256 + 1;
344 	iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
345 	iovs[2].iov_len = 4;
346 	iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
347 	iovs[3].iov_len = 3 + 123;
348 	iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
349 	iovs[4].iov_len = 389 + 6;
350 	iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
351 	iovs[5].iov_len = 2 + 512 + 8 + 432;
352 	iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
353 	iovs[6].iov_len = 80 + 8;
354 
355 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
356 
357 	CU_ASSERT(dif_ctx.data_offset == 0);
358 	CU_ASSERT(pdu.data_len == 500);
359 	CU_ASSERT(pdu.data_iovcnt == 2);
360 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
361 	CU_ASSERT(pdu.data_iov[0].iov_len == 256);
362 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
363 	CU_ASSERT(pdu.data_iov[1].iov_len == 244);
364 
365 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
366 
367 	CU_ASSERT(dif_ctx.data_offset == 500);
368 	CU_ASSERT(pdu.data_len == 1000);
369 	CU_ASSERT(pdu.data_iovcnt == 5);
370 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
371 	CU_ASSERT(pdu.data_iov[0].iov_len == 13);
372 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
373 	CU_ASSERT(pdu.data_iov[1].iov_len == 4);
374 	CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
375 	CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
376 	CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
377 	CU_ASSERT(pdu.data_iov[3].iov_len == 395);
378 	CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
379 	CU_ASSERT(pdu.data_iov[4].iov_len == 478);
380 
381 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
382 
383 	CU_ASSERT(dif_ctx.data_offset == 1500);
384 	CU_ASSERT(pdu.data_len == 548);
385 	CU_ASSERT(pdu.data_iovcnt == 2);
386 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
387 	CU_ASSERT(pdu.data_iov[0].iov_len == 476);
388 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
389 	CU_ASSERT(pdu.data_iov[1].iov_len == 88);
390 }
391 
392 static void
393 test_nvme_tcp_build_iovs_with_md(void)
394 {
395 	struct nvme_tcp_pdu pdu = {};
396 	struct iovec iovs[11] = {};
397 	struct spdk_dif_ctx dif_ctx = {};
398 	uint32_t mapped_length = 0;
399 	int rc;
400 	struct spdk_dif_ctx_init_ext_opts dif_opts;
401 
402 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
403 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
404 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
405 			       0, 0, 0, 0, 0, &dif_opts);
406 	CU_ASSERT(rc == 0);
407 
408 	pdu.dif_ctx = &dif_ctx;
409 
410 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
411 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
412 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
413 			      SPDK_NVME_TCP_DIGEST_LEN;
414 	pdu.data_len = 512 * 8;
415 	pdu.padding_len = 0;
416 
417 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
418 	pdu.data_iov[0].iov_len = (512 + 8) * 8;
419 	pdu.data_iovcnt = 1;
420 
421 	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
422 	CU_ASSERT(rc == 10);
423 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
424 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
425 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
426 	CU_ASSERT(iovs[1].iov_len == 512);
427 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
428 	CU_ASSERT(iovs[2].iov_len == 512);
429 	CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
430 	CU_ASSERT(iovs[3].iov_len == 512);
431 	CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
432 	CU_ASSERT(iovs[4].iov_len == 512);
433 	CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
434 	CU_ASSERT(iovs[5].iov_len == 512);
435 	CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
436 	CU_ASSERT(iovs[6].iov_len == 512);
437 	CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
438 	CU_ASSERT(iovs[7].iov_len == 512);
439 	CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
440 	CU_ASSERT(iovs[8].iov_len == 512);
441 	CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
442 	CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
443 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
444 		  512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
445 }
446 
447 /* Just define, nothing to do */
448 static void
449 ut_nvme_complete_request(void *arg, const struct spdk_nvme_cpl *cpl)
450 {
451 	return;
452 }
453 
454 static void
455 test_nvme_tcp_req_complete_safe(void)
456 {
457 	bool rc;
458 	struct nvme_tcp_req	tcp_req = {0};
459 	struct nvme_request	req = {{0}};
460 	struct nvme_tcp_qpair	tqpair = {{0}};
461 
462 	tcp_req.req = &req;
463 	tcp_req.req->qpair = &tqpair.qpair;
464 	tcp_req.req->cb_fn = ut_nvme_complete_request;
465 	tcp_req.tqpair = &tqpair;
466 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
467 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
468 	tqpair.qpair.num_outstanding_reqs = 1;
469 
470 	/* Test case 1: send operation and transfer completed. Expect: PASS */
471 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
472 	tcp_req.ordering.bits.send_ack = 1;
473 	tcp_req.ordering.bits.data_recv = 1;
474 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
475 
476 	rc = nvme_tcp_req_complete_safe(&tcp_req);
477 	CU_ASSERT(rc == true);
478 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
479 
480 	/* Test case 2: send operation not completed. Expect: FAIL */
481 	tcp_req.ordering.raw = 0;
482 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
483 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
484 	tqpair.qpair.num_outstanding_reqs = 1;
485 
486 	rc = nvme_tcp_req_complete_safe(&tcp_req);
487 	SPDK_CU_ASSERT_FATAL(rc != true);
488 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
489 	TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
490 
491 	/* Test case 3: in completion context. Expect: PASS */
492 	tqpair.qpair.in_completion_context = 1;
493 	tqpair.async_complete = 0;
494 	tcp_req.ordering.bits.send_ack = 1;
495 	tcp_req.ordering.bits.data_recv = 1;
496 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
497 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
498 	tqpair.qpair.num_outstanding_reqs = 1;
499 
500 	rc = nvme_tcp_req_complete_safe(&tcp_req);
501 	CU_ASSERT(rc == true);
502 	CU_ASSERT(tcp_req.tqpair->async_complete == 0);
503 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
504 
505 	/* Test case 4: in async complete. Expect: PASS */
506 	tqpair.qpair.in_completion_context = 0;
507 	tcp_req.ordering.bits.send_ack = 1;
508 	tcp_req.ordering.bits.data_recv = 1;
509 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
510 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
511 	tqpair.qpair.num_outstanding_reqs = 1;
512 
513 	rc = nvme_tcp_req_complete_safe(&tcp_req);
514 	CU_ASSERT(rc == true);
515 	CU_ASSERT(tcp_req.tqpair->async_complete);
516 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
517 }
518 
519 static void
520 test_nvme_tcp_req_init(void)
521 {
522 	struct nvme_tcp_qpair tqpair = {};
523 	struct nvme_request req = {};
524 	struct nvme_tcp_req tcp_req = {0};
525 	struct spdk_nvme_ctrlr ctrlr = {{0}};
526 	struct nvme_tcp_ut_bdev_io bio = {};
527 	int rc;
528 
529 	tqpair.qpair.ctrlr = &ctrlr;
530 	req.qpair = &tqpair.qpair;
531 
532 	tcp_req.cid = 1;
533 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
534 	req.payload_offset = 0;
535 	req.payload_size = 4096;
536 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
537 	ctrlr.ioccsz_bytes = 1024;
538 	bio.iovpos = 0;
539 	bio.iovs[0].iov_len = 8192;
540 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
541 
542 	/* Test case1: payload type SGL. Expect: PASS */
543 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
544 	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
545 
546 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
547 	CU_ASSERT(rc == 0);
548 	CU_ASSERT(tcp_req.req == &req);
549 	CU_ASSERT(tcp_req.in_capsule_data == true);
550 	CU_ASSERT(tcp_req.iovcnt == 1);
551 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
552 	CU_ASSERT(tcp_req.iov[0].iov_base == bio.iovs[0].iov_base);
553 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
554 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
555 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
556 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
557 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
558 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
559 
560 	/* Test case2: payload type CONTIG. Expect: PASS */
561 	memset(&req.cmd, 0, sizeof(req.cmd));
562 	memset(&tcp_req, 0, sizeof(tcp_req));
563 	tcp_req.cid = 1;
564 	req.payload = NVME_PAYLOAD_CONTIG(&bio, NULL);
565 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
566 
567 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
568 	CU_ASSERT(rc == 0);
569 	CU_ASSERT(tcp_req.req == &req);
570 	CU_ASSERT(tcp_req.in_capsule_data == true);
571 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
572 	CU_ASSERT(tcp_req.iov[0].iov_base == &bio);
573 	CU_ASSERT(tcp_req.iovcnt == 1);
574 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
575 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
576 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
577 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
578 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
579 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
580 
581 }
582 
583 static void
584 test_nvme_tcp_req_get(void)
585 {
586 	struct nvme_tcp_req tcp_req = {0};
587 	struct nvme_tcp_qpair tqpair = {};
588 	struct nvme_tcp_pdu send_pdu = {};
589 
590 	tcp_req.pdu = &send_pdu;
591 	tcp_req.state = NVME_TCP_REQ_FREE;
592 
593 	TAILQ_INIT(&tqpair.free_reqs);
594 	TAILQ_INIT(&tqpair.outstanding_reqs);
595 	TAILQ_INSERT_HEAD(&tqpair.free_reqs, &tcp_req, link);
596 
597 	CU_ASSERT(nvme_tcp_req_get(&tqpair) == &tcp_req);
598 	CU_ASSERT(tcp_req.state == NVME_TCP_REQ_ACTIVE);
599 	CU_ASSERT(tcp_req.datao == 0);
600 	CU_ASSERT(tcp_req.req == NULL);
601 	CU_ASSERT(tcp_req.in_capsule_data == false);
602 	CU_ASSERT(tcp_req.r2tl_remain == 0);
603 	CU_ASSERT(tcp_req.iovcnt == 0);
604 	CU_ASSERT(tcp_req.ordering.raw == 0);
605 	/* outstanding_reqs should still be empty - caller is responsible
606 	 * for putting it on the TAILQ after any other initialization is
607 	 * completed.
608 	 */
609 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
610 	CU_ASSERT(TAILQ_EMPTY(&tqpair.free_reqs));
611 
612 	/* No tcp request available, expect fail */
613 	SPDK_CU_ASSERT_FATAL(nvme_tcp_req_get(&tqpair) == NULL);
614 }
615 
616 static void
617 test_nvme_tcp_qpair_capsule_cmd_send(void)
618 {
619 	struct nvme_tcp_qpair tqpair = {};
620 	struct spdk_nvme_tcp_stat stats = {};
621 	struct nvme_tcp_req tcp_req = {};
622 	struct nvme_tcp_pdu pdu = {};
623 	struct nvme_request req = {};
624 	char iov_base0[4096];
625 	char iov_base1[4096];
626 	uint32_t plen;
627 	uint8_t pdo;
628 
629 	memset(iov_base0, 0xFF, 4096);
630 	memset(iov_base1, 0xFF, 4096);
631 	tcp_req.req = &req;
632 	tcp_req.pdu = &pdu;
633 	TAILQ_INIT(&tqpair.send_queue);
634 	tqpair.stats = &stats;
635 
636 	tcp_req.iov[0].iov_base = (void *)iov_base0;
637 	tcp_req.iov[0].iov_len = 4096;
638 	tcp_req.iov[1].iov_base = (void *)iov_base1;
639 	tcp_req.iov[1].iov_len = 4096;
640 	tcp_req.iovcnt = 2;
641 	tcp_req.req->payload_size = 8192;
642 	tcp_req.in_capsule_data = true;
643 	tqpair.cpda = NVME_TCP_HPDA_DEFAULT;
644 
645 	/* Test case 1: host hdgst and ddgst enable. Expect: PASS */
646 	tqpair.flags.host_hdgst_enable = 1;
647 	tqpair.flags.host_ddgst_enable = 1;
648 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd) +
649 		     SPDK_NVME_TCP_DIGEST_LEN;
650 	plen += tcp_req.req->payload_size;
651 	plen += SPDK_NVME_TCP_DIGEST_LEN;
652 
653 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
654 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
655 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
656 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
657 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
658 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
659 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
660 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
661 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
662 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
663 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
664 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
665 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
666 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
667 
668 	/* Test case 2: host hdgst and ddgst disable. Expect: PASS */
669 	memset(&pdu, 0, sizeof(pdu));
670 	tqpair.flags.host_hdgst_enable = 0;
671 	tqpair.flags.host_ddgst_enable = 0;
672 
673 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd);
674 	plen += tcp_req.req->payload_size;
675 
676 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
677 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
678 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags == 0)
679 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
680 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
681 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
682 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
683 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
684 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
685 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
686 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
687 
688 	/* Test case 3: padding available. Expect: PASS */
689 	memset(&pdu, 0, sizeof(pdu));
690 	tqpair.flags.host_hdgst_enable = 1;
691 	tqpair.flags.host_ddgst_enable = 1;
692 	tqpair.cpda = SPDK_NVME_TCP_CPDA_MAX;
693 
694 	pdo = plen = (SPDK_NVME_TCP_CPDA_MAX + 1) << 2;
695 	plen += tcp_req.req->payload_size;
696 	plen += SPDK_NVME_TCP_DIGEST_LEN;
697 
698 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
699 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
700 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
701 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
702 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
703 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
704 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
705 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
706 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
707 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
708 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
709 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
710 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
711 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
712 }
713 
714 /* Just define, nothing to do */
715 static void
716 ut_nvme_tcp_qpair_xfer_complete_cb(void *cb_arg)
717 {
718 	return;
719 }
720 
721 static void
722 test_nvme_tcp_qpair_write_pdu(void)
723 {
724 	struct nvme_tcp_qpair tqpair = {};
725 	struct spdk_nvme_tcp_stat stats = {};
726 	struct nvme_request req = {};
727 	struct nvme_tcp_req treq = { .req = &req };
728 	struct nvme_tcp_pdu pdu = { .req = &treq };
729 	void *cb_arg = (void *)0xDEADBEEF;
730 	char iov_base0[4096];
731 	char iov_base1[4096];
732 
733 	memset(iov_base0, 0xFF, 4096);
734 	memset(iov_base1, 0xFF, 4096);
735 	pdu.data_len = 4096 * 2;
736 	pdu.padding_len = 0;
737 	pdu.data_iov[0].iov_base = (void *)iov_base0;
738 	pdu.data_iov[0].iov_len = 4096;
739 	pdu.data_iov[1].iov_base = (void *)iov_base1;
740 	pdu.data_iov[1].iov_len = 4096;
741 	pdu.data_iovcnt = 2;
742 	TAILQ_INIT(&tqpair.send_queue);
743 
744 	/* Test case1: host hdgst and ddgst enable Expect: PASS */
745 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
746 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
747 
748 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
749 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
750 	pdu.hdr.common.plen = pdu.hdr.common.hlen +
751 			      SPDK_NVME_TCP_DIGEST_LEN * 2 ;
752 	pdu.hdr.common.plen += pdu.data_len;
753 	tqpair.flags.host_hdgst_enable = 1;
754 	tqpair.flags.host_ddgst_enable = 1;
755 	tqpair.stats = &stats;
756 
757 	nvme_tcp_qpair_write_pdu(&tqpair,
758 				 &pdu,
759 				 ut_nvme_tcp_qpair_xfer_complete_cb,
760 				 cb_arg);
761 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
762 	/* Check the crc data of header digest filled into raw */
763 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen]);
764 	CU_ASSERT(pdu.data_digest[0]);
765 	CU_ASSERT(pdu.sock_req.iovcnt == 4);
766 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
767 	CU_ASSERT(pdu.iov[0].iov_len == (sizeof(struct spdk_nvme_tcp_cmd) +
768 					 SPDK_NVME_TCP_DIGEST_LEN));
769 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
770 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
771 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
772 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
773 	CU_ASSERT(pdu.iov[3].iov_base == &pdu.data_digest);
774 	CU_ASSERT(pdu.iov[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
775 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
776 	CU_ASSERT(pdu.cb_arg == cb_arg);
777 	CU_ASSERT(pdu.qpair == &tqpair);
778 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
779 
780 	/* Test case2: host hdgst and ddgst disable Expect: PASS */
781 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
782 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
783 
784 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
785 	pdu.hdr.common.plen = pdu.hdr.common.hlen  + pdu.data_len;
786 	tqpair.flags.host_hdgst_enable = 0;
787 	tqpair.flags.host_ddgst_enable = 0;
788 
789 	nvme_tcp_qpair_write_pdu(&tqpair,
790 				 &pdu,
791 				 ut_nvme_tcp_qpair_xfer_complete_cb,
792 				 cb_arg);
793 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
794 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen] == 0);
795 	CU_ASSERT(pdu.data_digest[0] == 0);
796 	CU_ASSERT(pdu.sock_req.iovcnt == 3);
797 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
798 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd));
799 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
800 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
801 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
802 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
803 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
804 	CU_ASSERT(pdu.cb_arg == cb_arg);
805 	CU_ASSERT(pdu.qpair == &tqpair);
806 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
807 }
808 
809 static void
810 test_nvme_tcp_qpair_set_recv_state(void)
811 {
812 	struct nvme_tcp_qpair tqpair = {};
813 
814 	/* case1: The recv state of tqpair is same with the state to be set */
815 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
816 	nvme_tcp_qpair_set_recv_state(&tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
817 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
818 
819 	/* Different state will be set accordingly */
820 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY;
821 	nvme_tcp_qpair_set_recv_state(&tqpair, 0xff);
822 	CU_ASSERT(tqpair.recv_state == 0xff);
823 }
824 
825 static void
826 test_nvme_tcp_alloc_reqs(void)
827 {
828 	struct nvme_tcp_qpair tqpair = {};
829 	int rc = 0;
830 
831 	/* case1: single entry. Expect: PASS */
832 	tqpair.num_entries = 1;
833 	rc = nvme_tcp_alloc_reqs(&tqpair);
834 	CU_ASSERT(rc == 0);
835 	CU_ASSERT(tqpair.tcp_reqs[0].cid == 0);
836 	CU_ASSERT(tqpair.tcp_reqs[0].tqpair == &tqpair);
837 	CU_ASSERT(tqpair.tcp_reqs[0].pdu == &tqpair.send_pdus[0]);
838 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
839 	free(tqpair.tcp_reqs);
840 	spdk_free(tqpair.send_pdus);
841 
842 	/* case2: multiple entries. Expect: PASS */
843 	tqpair.num_entries = 5;
844 	rc = nvme_tcp_alloc_reqs(&tqpair);
845 	CU_ASSERT(rc == 0);
846 	for (int i = 0; i < tqpair.num_entries; i++) {
847 		CU_ASSERT(tqpair.tcp_reqs[i].cid == i);
848 		CU_ASSERT(tqpair.tcp_reqs[i].tqpair == &tqpair);
849 		CU_ASSERT(tqpair.tcp_reqs[i].pdu == &tqpair.send_pdus[i]);
850 	}
851 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
852 
853 	/* case3: Test nvme_tcp_free_reqs test. Expect: PASS */
854 	nvme_tcp_free_reqs(&tqpair);
855 	CU_ASSERT(tqpair.tcp_reqs == NULL);
856 	CU_ASSERT(tqpair.send_pdus == NULL);
857 }
858 
859 static void
860 test_nvme_tcp_parse_addr(void)
861 {
862 	struct sockaddr_storage dst_addr;
863 	int rc = 0;
864 
865 	memset(&dst_addr, 0, sizeof(dst_addr));
866 	/* case1: getaddrinfo failed */
867 	rc = nvme_tcp_parse_addr(&dst_addr, AF_INET, NULL, NULL);
868 	CU_ASSERT(rc != 0);
869 
870 	/* case2: res->ai_addrlen < sizeof(*sa). Expect: Pass. */
871 	rc = nvme_tcp_parse_addr(&dst_addr, AF_INET, "12.34.56.78", "23");
872 	CU_ASSERT(rc == 0);
873 	CU_ASSERT(dst_addr.ss_family == AF_INET);
874 }
875 
876 static void
877 test_nvme_tcp_qpair_send_h2c_term_req(void)
878 {
879 	struct nvme_tcp_qpair tqpair = {};
880 	struct spdk_nvme_tcp_stat stats = {};
881 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
882 	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
883 	uint32_t error_offset = 1;
884 
885 	tqpair.send_pdu = &send_pdu;
886 	tqpair.recv_pdu = &recv_pdu;
887 	tqpair.stats = &stats;
888 	TAILQ_INIT(&tqpair.send_queue);
889 	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
890 	pdu.hdr.common.hlen = 64;
891 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
892 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
893 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
894 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
895 		  pdu.hdr.common.hlen);
896 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
897 
898 	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
899 	pdu.hdr.common.hlen = 255;
900 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
901 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
902 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
903 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
904 		  tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
905 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
906 }
907 
908 static void
909 test_nvme_tcp_pdu_ch_handle(void)
910 {
911 	struct nvme_tcp_qpair tqpair = {};
912 	struct spdk_nvme_tcp_stat stats = {};
913 	struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
914 
915 	tqpair.send_pdu = &send_pdu;
916 	tqpair.recv_pdu = &recv_pdu;
917 	tqpair.stats = &stats;
918 	TAILQ_INIT(&tqpair.send_queue);
919 	/* case 1: Already received IC_RESP PDU. Expect: fail */
920 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
921 	tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
922 	nvme_tcp_pdu_ch_handle(&tqpair);
923 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
924 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
925 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
926 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
927 
928 	/* case 2: Expected PDU header length and received are different. Expect: fail */
929 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
930 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
931 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
932 	tqpair.recv_pdu->hdr.common.hlen = 0;
933 	nvme_tcp_pdu_ch_handle(&tqpair);
934 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
935 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
936 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
937 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
938 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
939 
940 	/* case 3: The TCP/IP tqpair connection is not negotiated. Expect: fail */
941 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
942 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
943 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
944 	tqpair.recv_pdu->hdr.common.hlen = 0;
945 	nvme_tcp_pdu_ch_handle(&tqpair);
946 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
947 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
948 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
949 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
950 
951 	/* case 4: Unexpected PDU type. Expect: fail */
952 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
953 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
954 	tqpair.recv_pdu->hdr.common.plen = 0;
955 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
956 	nvme_tcp_pdu_ch_handle(&tqpair);
957 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
958 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
959 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
960 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
961 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
962 
963 	/* case 5: plen error. Expect: fail */
964 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
965 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
966 	tqpair.recv_pdu->hdr.common.plen = 0;
967 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
968 	nvme_tcp_pdu_ch_handle(&tqpair);
969 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
970 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
971 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
972 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
973 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
974 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
975 
976 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
977 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
978 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
979 	tqpair.recv_pdu->hdr.common.plen = 0;
980 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
981 	nvme_tcp_pdu_ch_handle(&tqpair);
982 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
983 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
984 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
985 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
986 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
987 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
988 
989 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
990 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
991 	tqpair.recv_pdu->hdr.common.plen = 0;
992 	tqpair.recv_pdu->hdr.common.pdo = 64;
993 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
994 	nvme_tcp_pdu_ch_handle(&tqpair);
995 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
996 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
997 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
998 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
999 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1000 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1001 
1002 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1003 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1004 	tqpair.recv_pdu->hdr.common.plen = 0;
1005 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
1006 	nvme_tcp_pdu_ch_handle(&tqpair);
1007 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1008 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1009 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1010 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1011 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1012 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1013 
1014 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1015 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1016 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1017 	tqpair.recv_pdu->hdr.common.plen = 0;
1018 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
1019 	nvme_tcp_pdu_ch_handle(&tqpair);
1020 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1021 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1022 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1023 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1024 		  (unsigned)sizeof(struct spdk_nvme_tcp_r2t_hdr));
1025 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1026 
1027 	/* case 6: Expect:  PASS */
1028 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1029 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
1030 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1031 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1032 	nvme_tcp_pdu_ch_handle(&tqpair);
1033 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1034 	CU_ASSERT(tqpair.recv_pdu->psh_len == tqpair.recv_pdu->hdr.common.hlen - sizeof(
1035 			  struct spdk_nvme_tcp_common_pdu_hdr));
1036 }
1037 
1038 DEFINE_RETURN_MOCK(spdk_sock_connect_ext, struct spdk_sock *);
1039 struct spdk_sock *
1040 spdk_sock_connect_ext(const char *ip, int port,
1041 		      const char *_impl_name, struct spdk_sock_opts *opts)
1042 {
1043 	HANDLE_RETURN_MOCK(spdk_sock_connect_ext);
1044 	CU_ASSERT(port == 23);
1045 	CU_ASSERT(opts->opts_size == sizeof(*opts));
1046 	CU_ASSERT(opts->priority == 1);
1047 	CU_ASSERT(opts->zcopy == true);
1048 	CU_ASSERT(!strcmp(ip, "192.168.1.78"));
1049 	return (struct spdk_sock *)0xDDADBEEF;
1050 }
1051 
1052 static void
1053 test_nvme_tcp_qpair_connect_sock(void)
1054 {
1055 	struct nvme_tcp_ctrlr tctrlr = {};
1056 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1057 	struct nvme_tcp_qpair tqpair = {};
1058 	int rc;
1059 
1060 	tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1061 	tqpair.qpair.id = 1;
1062 	tqpair.qpair.poll_group = (void *)0xDEADBEEF;
1063 	ctrlr->trid.priority = 1;
1064 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1065 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1066 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1067 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1068 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1069 
1070 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1071 	CU_ASSERT(rc == 0);
1072 
1073 	/* Unsupported family of the transport address */
1074 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IB;
1075 
1076 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1077 	SPDK_CU_ASSERT_FATAL(rc == -1);
1078 
1079 	/* Invalid dst_port, INT_MAX is 2147483647 */
1080 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1081 	memcpy(ctrlr->trid.trsvcid, "2147483647", sizeof("2147483647"));
1082 
1083 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1084 	SPDK_CU_ASSERT_FATAL(rc == -1);
1085 
1086 	/* Parse invalid address */
1087 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1088 	memcpy(ctrlr->trid.traddr, "192.168.1.256", sizeof("192.168.1.256"));
1089 
1090 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1091 	SPDK_CU_ASSERT_FATAL(rc != 0);
1092 }
1093 
1094 static void
1095 test_nvme_tcp_qpair_icreq_send(void)
1096 {
1097 	struct nvme_tcp_qpair tqpair = {};
1098 	struct spdk_nvme_tcp_stat stats = {};
1099 	struct spdk_nvme_ctrlr ctrlr = {};
1100 	struct nvme_tcp_pdu pdu = {};
1101 	struct nvme_tcp_poll_group poll_group = {};
1102 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1103 	int rc;
1104 
1105 	tqpair.send_pdu = &pdu;
1106 	tqpair.qpair.ctrlr = &ctrlr;
1107 	tqpair.qpair.poll_group = &poll_group.group;
1108 	tqpair.stats = &stats;
1109 	ic_req = &pdu.hdr.ic_req;
1110 
1111 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1112 	tqpair.qpair.ctrlr->opts.header_digest = true;
1113 	tqpair.qpair.ctrlr->opts.data_digest = true;
1114 	TAILQ_INIT(&tqpair.send_queue);
1115 
1116 	rc = nvme_tcp_qpair_icreq_send(&tqpair);
1117 	CU_ASSERT(rc == 0);
1118 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1119 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1120 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1121 	CU_ASSERT(ic_req->pfv == 0);
1122 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1123 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1124 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1125 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1126 }
1127 
1128 static void
1129 test_nvme_tcp_c2h_payload_handle(void)
1130 {
1131 	struct nvme_tcp_qpair tqpair = {};
1132 	struct spdk_nvme_tcp_stat stats = {};
1133 	struct nvme_tcp_pdu pdu = {};
1134 	struct nvme_tcp_req tcp_req = {};
1135 	struct nvme_request	req = {};
1136 	struct nvme_tcp_pdu recv_pdu = {};
1137 	uint32_t reaped = 1;
1138 
1139 	tcp_req.req = &req;
1140 	tcp_req.req->qpair = &tqpair.qpair;
1141 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1142 	tcp_req.tqpair = &tqpair;
1143 	tcp_req.cid = 1;
1144 	tqpair.stats = &stats;
1145 
1146 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1147 
1148 	pdu.req = &tcp_req;
1149 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1150 					SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1151 	pdu.data_len = 1024;
1152 
1153 	tqpair.qpair.id = 1;
1154 	tqpair.recv_pdu = &recv_pdu;
1155 
1156 	/* case 1: nvme_tcp_c2h_data_payload_handle: tcp_req->datao != tcp_req->req->payload_size */
1157 	tcp_req.datao = 1024;
1158 	tcp_req.req->payload_size = 2048;
1159 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1160 	tcp_req.ordering.bits.send_ack = 1;
1161 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1162 	tcp_req.ordering.bits.data_recv = 0;
1163 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1164 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1165 	tqpair.qpair.num_outstanding_reqs = 1;
1166 
1167 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1168 
1169 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1170 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1171 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1172 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1173 	CU_ASSERT(reaped == 2);
1174 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1175 
1176 	/* case 2: nvme_tcp_c2h_data_payload_handle: tcp_req->datao == tcp_req->req->payload_size */
1177 	tcp_req.datao = 1024;
1178 	tcp_req.req->payload_size = 1024;
1179 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1180 	tcp_req.ordering.bits.send_ack = 1;
1181 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1182 	tcp_req.ordering.bits.data_recv = 0;
1183 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1184 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1185 	tqpair.qpair.num_outstanding_reqs = 1;
1186 
1187 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1188 
1189 	CU_ASSERT(tcp_req.rsp.status.p == 1);
1190 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1191 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1192 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1193 	CU_ASSERT(reaped == 3);
1194 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1195 
1196 	/* case 3: nvme_tcp_c2h_data_payload_handle: flag does not have SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS */
1197 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1198 	tcp_req.datao = 1024;
1199 	tcp_req.req->payload_size = 1024;
1200 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1201 	tcp_req.ordering.bits.send_ack = 1;
1202 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1203 	tcp_req.ordering.bits.data_recv = 0;
1204 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1205 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1206 	tqpair.qpair.num_outstanding_reqs = 1;
1207 
1208 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1209 
1210 	CU_ASSERT(reaped == 3);
1211 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1212 
1213 	/* case 4: nvme_tcp_c2h_term_req_payload_handle: recv_state is NVME_TCP_PDU_RECV_STATE_ERROR */
1214 	pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1215 	nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
1216 
1217 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1218 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1219 }
1220 
1221 static void
1222 test_nvme_tcp_icresp_handle(void)
1223 {
1224 	struct nvme_tcp_qpair tqpair = {};
1225 	struct spdk_nvme_tcp_stat stats = {};
1226 	struct nvme_tcp_pdu pdu = {};
1227 	struct nvme_tcp_pdu send_pdu = {};
1228 	struct nvme_tcp_pdu recv_pdu = {};
1229 
1230 	tqpair.send_pdu = &send_pdu;
1231 	tqpair.recv_pdu = &recv_pdu;
1232 	tqpair.stats = &stats;
1233 	TAILQ_INIT(&tqpair.send_queue);
1234 
1235 	/* case 1: Expected ICResp PFV and got are different. */
1236 	pdu.hdr.ic_resp.pfv = 1;
1237 
1238 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1239 
1240 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1241 
1242 	/* case 2: Expected ICResp maxh2cdata and got are different. */
1243 	pdu.hdr.ic_resp.pfv = 0;
1244 	pdu.hdr.ic_resp.maxh2cdata = 2048;
1245 
1246 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1247 
1248 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1249 
1250 	/* case 3: Expected ICResp cpda and got are different. */
1251 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1252 	pdu.hdr.ic_resp.cpda = 64;
1253 
1254 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1255 
1256 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1257 
1258 	/* case 4: waiting icreq ack. */
1259 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1260 	pdu.hdr.ic_resp.cpda = 30;
1261 	pdu.hdr.ic_resp.dgst.bits.hdgst_enable = true;
1262 	pdu.hdr.ic_resp.dgst.bits.ddgst_enable = true;
1263 	tqpair.flags.icreq_send_ack = 0;
1264 
1265 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1266 
1267 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1268 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_INITIALIZING);
1269 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1270 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1271 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1272 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1273 
1274 	/* case 5: Expect: PASS. */
1275 	tqpair.flags.icreq_send_ack = 1;
1276 
1277 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1278 
1279 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1280 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND);
1281 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1282 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1283 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1284 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1285 }
1286 
1287 static void
1288 test_nvme_tcp_pdu_payload_handle(void)
1289 {
1290 	struct nvme_tcp_qpair	tqpair = {};
1291 	struct spdk_nvme_tcp_stat	stats = {};
1292 	struct nvme_tcp_pdu	recv_pdu = {};
1293 	struct nvme_tcp_req	tcp_req = {};
1294 	struct nvme_request	req = {};
1295 	uint32_t		reaped = 0;
1296 
1297 	tqpair.recv_pdu = &recv_pdu;
1298 	tcp_req.tqpair = &tqpair;
1299 	tcp_req.req = &req;
1300 	tcp_req.req->qpair = &tqpair.qpair;
1301 	tqpair.stats = &stats;
1302 
1303 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1304 	tqpair.qpair.id = 1;
1305 	recv_pdu.ddgst_enable = false;
1306 	recv_pdu.req = &tcp_req;
1307 	recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1308 					     SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1309 	recv_pdu.data_len = 1024;
1310 	tcp_req.ordering.bits.data_recv = 0;
1311 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1312 	tcp_req.cid = 1;
1313 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1314 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1315 	tqpair.qpair.num_outstanding_reqs = 1;
1316 
1317 	/* C2H_DATA */
1318 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
1319 	tcp_req.datao = 1024;
1320 	tcp_req.req->payload_size = 2048;
1321 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1322 	tcp_req.ordering.bits.send_ack = 1;
1323 
1324 	recv_pdu.req = &tcp_req;
1325 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1326 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1327 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1328 	CU_ASSERT(tcp_req.rsp.cid == 1);
1329 	CU_ASSERT(tcp_req.rsp.sqid == 1);
1330 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1331 	CU_ASSERT(reaped == 1);
1332 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1333 
1334 	/* TermResp */
1335 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1336 	recv_pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1337 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1338 
1339 	recv_pdu.req = &tcp_req;
1340 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1341 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1342 }
1343 
1344 static void
1345 test_nvme_tcp_capsule_resp_hdr_handle(void)
1346 {
1347 	struct nvme_tcp_qpair	tqpair = {};
1348 	struct spdk_nvme_ctrlr	ctrlr = {};
1349 	struct spdk_nvme_tcp_stat	stats = {};
1350 	struct nvme_request	req = {};
1351 	struct spdk_nvme_cpl	rccqe_tgt = {};
1352 	struct nvme_tcp_req	*tcp_req = NULL;
1353 	uint32_t		reaped = 0;
1354 	int			rc;
1355 
1356 	/* Initialize requests and pdus */
1357 	tqpair.num_entries = 1;
1358 	tqpair.stats = &stats;
1359 	req.qpair = &tqpair.qpair;
1360 	req.qpair->ctrlr = &ctrlr;
1361 	req.payload = NVME_PAYLOAD_CONTIG(NULL, NULL);
1362 
1363 	rc = nvme_tcp_alloc_reqs(&tqpair);
1364 	SPDK_CU_ASSERT_FATAL(rc == 0);
1365 	tcp_req = nvme_tcp_req_get(&tqpair);
1366 	SPDK_CU_ASSERT_FATAL(tcp_req != NULL);
1367 	rc = nvme_tcp_req_init(&tqpair, &req, tcp_req);
1368 	SPDK_CU_ASSERT_FATAL(rc == 0);
1369 	tcp_req->ordering.bits.send_ack = 1;
1370 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1371 	/* tqpair.recv_pdu will be reseted after handling */
1372 	memset(&rccqe_tgt, 0xff, sizeof(rccqe_tgt));
1373 	rccqe_tgt.cid = 0;
1374 	memcpy(&tqpair.recv_pdu->hdr.capsule_resp.rccqe, &rccqe_tgt, sizeof(rccqe_tgt));
1375 	tqpair.qpair.num_outstanding_reqs = 1;
1376 
1377 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1378 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1379 	CU_ASSERT(!memcmp(&tcp_req->rsp, &rccqe_tgt, sizeof(rccqe_tgt)));
1380 	CU_ASSERT(tcp_req->ordering.bits.data_recv == 1);
1381 	CU_ASSERT(reaped == 1);
1382 	CU_ASSERT(TAILQ_EMPTY(&tcp_req->tqpair->outstanding_reqs));
1383 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1384 
1385 	/* Get tcp request error, expect fail */
1386 	reaped = 0;
1387 	tqpair.recv_pdu->hdr.capsule_resp.rccqe.cid = 1;
1388 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1389 
1390 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1391 	CU_ASSERT(reaped == 0);
1392 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1393 	nvme_tcp_free_reqs(&tqpair);
1394 }
1395 
1396 static void
1397 test_nvme_tcp_ctrlr_connect_qpair(void)
1398 {
1399 	struct spdk_nvme_ctrlr ctrlr = {};
1400 	struct spdk_nvme_qpair *qpair;
1401 	struct nvme_tcp_qpair *tqpair;
1402 	struct nvme_tcp_pdu pdu = {};
1403 	struct nvme_tcp_pdu recv_pdu = {};
1404 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1405 	int rc;
1406 
1407 	tqpair = calloc(1, sizeof(*tqpair));
1408 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1409 	tqpair->recv_pdu = &recv_pdu;
1410 	qpair = &tqpair->qpair;
1411 	tqpair->sock = (struct spdk_sock *)0xDEADBEEF;
1412 	tqpair->send_pdu = &pdu;
1413 	tqpair->qpair.ctrlr = &ctrlr;
1414 	tqpair->qpair.state = NVME_QPAIR_CONNECTING;
1415 	tqpair->num_entries = 128;
1416 	ic_req = &pdu.hdr.ic_req;
1417 
1418 	tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1419 	tqpair->recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1420 	tqpair->recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1421 	tqpair->recv_pdu->ch_valid_bytes = sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1422 	tqpair->recv_pdu->psh_valid_bytes = tqpair->recv_pdu->hdr.common.hlen -
1423 					    sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1424 	tqpair->recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
1425 	tqpair->recv_pdu->hdr.ic_resp.cpda = 1;
1426 	tqpair->flags.icreq_send_ack = 1;
1427 	tqpair->qpair.ctrlr->opts.header_digest = true;
1428 	tqpair->qpair.ctrlr->opts.data_digest = true;
1429 	TAILQ_INIT(&tqpair->send_queue);
1430 
1431 	rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
1432 	CU_ASSERT(rc == 0);
1433 
1434 	/* skip NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY state */
1435 	/* assume already received the icresp */
1436 	tqpair->recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1437 
1438 	while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
1439 		rc = nvme_tcp_qpair_process_completions(qpair, 0);
1440 		CU_ASSERT(rc >= 0);
1441 	}
1442 
1443 	CU_ASSERT(tqpair->maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
1444 	CU_ASSERT(tqpair->state == NVME_TCP_QPAIR_STATE_RUNNING);
1445 	CU_ASSERT(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1446 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1447 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1448 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1449 	CU_ASSERT(ic_req->pfv == 0);
1450 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1451 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1452 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1453 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1454 
1455 	nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1456 }
1457 
1458 static void
1459 ut_disconnect_qpair_req_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
1460 {
1461 	CU_ASSERT_EQUAL(cpl->status.sc, SPDK_NVME_SC_ABORTED_SQ_DELETION);
1462 	CU_ASSERT_EQUAL(cpl->status.sct, SPDK_NVME_SCT_GENERIC);
1463 }
1464 
1465 static void
1466 ut_disconnect_qpair_poll_group_cb(struct spdk_nvme_qpair *qpair, void *ctx)
1467 {
1468 	int *disconnected = ctx;
1469 
1470 	(*disconnected)++;
1471 }
1472 
1473 static void
1474 test_nvme_tcp_ctrlr_disconnect_qpair(void)
1475 {
1476 	struct spdk_nvme_ctrlr ctrlr = {};
1477 	struct spdk_nvme_qpair *qpair;
1478 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {};
1479 	struct nvme_tcp_qpair tqpair = {
1480 		.qpair = {
1481 			.trtype = SPDK_NVME_TRANSPORT_TCP,
1482 			.ctrlr = &ctrlr,
1483 			.async = true,
1484 		},
1485 		.recv_pdu = &recv_pdu,
1486 	};
1487 	struct spdk_nvme_poll_group group = {};
1488 	struct nvme_tcp_poll_group tgroup = { .group.group = &group };
1489 	struct nvme_request req = { .qpair = &tqpair.qpair, .cb_fn = ut_disconnect_qpair_req_cb };
1490 	struct nvme_tcp_req treq = { .req = &req, .tqpair = &tqpair };
1491 	int rc, disconnected;
1492 
1493 	qpair = &tqpair.qpair;
1494 	qpair->poll_group = &tgroup.group;
1495 	tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
1496 	tqpair.needs_poll = true;
1497 	TAILQ_INIT(&tgroup.needs_poll);
1498 	STAILQ_INIT(&tgroup.group.disconnected_qpairs);
1499 	TAILQ_INIT(&tqpair.send_queue);
1500 	TAILQ_INIT(&tqpair.free_reqs);
1501 	TAILQ_INIT(&tqpair.outstanding_reqs);
1502 	TAILQ_INSERT_TAIL(&tgroup.needs_poll, &tqpair, link);
1503 	TAILQ_INSERT_TAIL(&tqpair.send_queue, &pdu, tailq);
1504 
1505 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1506 
1507 	CU_ASSERT(tqpair.needs_poll == false);
1508 	CU_ASSERT(tqpair.sock == NULL);
1509 	CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue) == true);
1510 
1511 	/* Check that outstanding requests are aborted */
1512 	treq.state = NVME_TCP_REQ_ACTIVE;
1513 	qpair->num_outstanding_reqs = 1;
1514 	qpair->state = NVME_QPAIR_DISCONNECTING;
1515 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1516 
1517 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1518 
1519 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
1520 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 0);
1521 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.free_reqs));
1522 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1523 
1524 	/* Check that a request with an accel operation in progress won't be aborted until that
1525 	 * operation is completed */
1526 	treq.state = NVME_TCP_REQ_ACTIVE;
1527 	treq.ordering.bits.in_progress_accel = 1;
1528 	qpair->poll_group = NULL;
1529 	qpair->num_outstanding_reqs = 1;
1530 	qpair->state = NVME_QPAIR_DISCONNECTING;
1531 	TAILQ_REMOVE(&tqpair.free_reqs, &treq, link);
1532 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1533 
1534 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1535 
1536 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1537 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1538 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1539 
1540 	/* Check that a qpair will be transitioned to a DISCONNECTED state only once the accel
1541 	 * operation is completed */
1542 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1543 	CU_ASSERT_EQUAL(rc, 0);
1544 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1545 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1546 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1547 
1548 	treq.ordering.bits.in_progress_accel = 0;
1549 	qpair->num_outstanding_reqs = 0;
1550 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1551 
1552 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1553 	CU_ASSERT_EQUAL(rc, -ENXIO);
1554 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1555 
1556 	/* Check the same scenario but this time with spdk_sock_flush() returning errors */
1557 	treq.state = NVME_TCP_REQ_ACTIVE;
1558 	treq.ordering.bits.in_progress_accel = 1;
1559 	qpair->num_outstanding_reqs = 1;
1560 	qpair->state = NVME_QPAIR_DISCONNECTING;
1561 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1562 
1563 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1564 
1565 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1566 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1567 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1568 
1569 	MOCK_SET(spdk_sock_flush, -ENOTCONN);
1570 	treq.ordering.bits.in_progress_accel = 0;
1571 	qpair->num_outstanding_reqs = 0;
1572 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1573 
1574 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1575 	CU_ASSERT_EQUAL(rc, 0);
1576 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1577 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1578 	CU_ASSERT_EQUAL(rc, -ENXIO);
1579 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1580 	MOCK_CLEAR(spdk_sock_flush);
1581 
1582 	/* Now check the same scenario, but with a qpair that's part of a poll group */
1583 	disconnected = 0;
1584 	group.ctx = &disconnected;
1585 	treq.state = NVME_TCP_REQ_ACTIVE;
1586 	treq.ordering.bits.in_progress_accel = 1;
1587 	qpair->poll_group = &tgroup.group;
1588 	qpair->num_outstanding_reqs = 1;
1589 	qpair->state = NVME_QPAIR_DISCONNECTING;
1590 	STAILQ_INSERT_TAIL(&tgroup.group.disconnected_qpairs, qpair, poll_group_stailq);
1591 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1592 
1593 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1594 						ut_disconnect_qpair_poll_group_cb);
1595 	/* Until there's an outstanding request, disconnect_cb shouldn't be executed */
1596 	CU_ASSERT_EQUAL(disconnected, 0);
1597 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1598 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1599 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1600 
1601 	treq.ordering.bits.in_progress_accel = 0;
1602 	qpair->num_outstanding_reqs = 0;
1603 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1604 
1605 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1606 						ut_disconnect_qpair_poll_group_cb);
1607 	CU_ASSERT_EQUAL(disconnected, 1);
1608 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1609 
1610 	/* Check that a non-async qpair is marked as disconnected immediately */
1611 	qpair->poll_group = NULL;
1612 	qpair->state = NVME_QPAIR_DISCONNECTING;
1613 	qpair->async = false;
1614 
1615 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1616 
1617 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1618 }
1619 
1620 static void
1621 test_nvme_tcp_ctrlr_create_io_qpair(void)
1622 {
1623 	struct spdk_nvme_qpair *qpair = NULL;
1624 	struct nvme_tcp_ctrlr tctrlr = {};
1625 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1626 	uint16_t qid = 1;
1627 	struct spdk_nvme_io_qpair_opts opts = {
1628 		.io_queue_size = 2,
1629 		.qprio = SPDK_NVME_QPRIO_URGENT,
1630 		.io_queue_requests = 1,
1631 	};
1632 	struct nvme_tcp_qpair *tqpair;
1633 
1634 	ctrlr->trid.priority = 1;
1635 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1636 	memset(ctrlr->opts.psk, 0, sizeof(ctrlr->opts.psk));
1637 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1638 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1639 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1640 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1641 
1642 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1643 	tqpair = nvme_tcp_qpair(qpair);
1644 
1645 	CU_ASSERT(qpair != NULL);
1646 	CU_ASSERT(qpair->id == 1);
1647 	CU_ASSERT(qpair->ctrlr == ctrlr);
1648 	CU_ASSERT(qpair->qprio == SPDK_NVME_QPRIO_URGENT);
1649 	CU_ASSERT(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
1650 	CU_ASSERT(qpair->poll_group == (void *)0xDEADBEEF);
1651 	CU_ASSERT(tqpair->num_entries == 1);
1652 
1653 	free(tqpair->tcp_reqs);
1654 	spdk_free(tqpair->send_pdus);
1655 	free(tqpair);
1656 
1657 	/* Max queue size shall pass */
1658 	opts.io_queue_size = 0xffff;
1659 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1660 	tqpair = nvme_tcp_qpair(qpair);
1661 
1662 	CU_ASSERT(qpair != NULL);
1663 	CU_ASSERT(tqpair->num_entries == 0xfffe);
1664 
1665 	free(tqpair->tcp_reqs);
1666 	spdk_free(tqpair->send_pdus);
1667 	free(tqpair);
1668 
1669 	/* Queue size 0 shall fail */
1670 	opts.io_queue_size = 0;
1671 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1672 	CU_ASSERT(qpair == NULL);
1673 
1674 	/* Queue size 1 shall fail */
1675 	opts.io_queue_size = 1;
1676 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1677 	CU_ASSERT(qpair == NULL);
1678 }
1679 
1680 static void
1681 test_nvme_tcp_ctrlr_delete_io_qpair(void)
1682 {
1683 	struct spdk_nvme_ctrlr	ctrlr = {};
1684 	struct spdk_nvme_qpair *qpair;
1685 	struct nvme_tcp_qpair *tqpair;
1686 	struct nvme_tcp_req tcp_req = {};
1687 	struct nvme_request	req = {};
1688 	int rc;
1689 
1690 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1691 	tqpair->tcp_reqs = calloc(1, sizeof(struct nvme_tcp_req));
1692 	tqpair->send_pdus = calloc(1, sizeof(struct nvme_tcp_pdu));
1693 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1694 	qpair = &tqpair->qpair;
1695 	qpair->ctrlr = &ctrlr;
1696 	tcp_req.req = &req;
1697 	tcp_req.req->qpair = &tqpair->qpair;
1698 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1699 	tcp_req.tqpair = tqpair;
1700 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1701 	TAILQ_INIT(&tqpair->outstanding_reqs);
1702 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1703 	qpair->num_outstanding_reqs = 1;
1704 
1705 	rc = nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1706 
1707 	CU_ASSERT(rc == 0);
1708 }
1709 
1710 static void
1711 test_nvme_tcp_poll_group_get_stats(void)
1712 {
1713 	int rc = 0;
1714 	struct spdk_sock_group sgroup = {};
1715 	struct nvme_tcp_poll_group *pgroup = NULL;
1716 	struct spdk_nvme_transport_poll_group *tgroup = NULL;
1717 	struct spdk_nvme_transport_poll_group_stat *tgroup_stat = NULL;
1718 
1719 	MOCK_SET(spdk_sock_group_create, &sgroup);
1720 	tgroup = nvme_tcp_poll_group_create();
1721 	CU_ASSERT(tgroup != NULL);
1722 	pgroup = nvme_tcp_poll_group(tgroup);
1723 	CU_ASSERT(pgroup != NULL);
1724 
1725 	/* Invalid group pointer, expect fail and return -EINVAL */
1726 	rc = nvme_tcp_poll_group_get_stats(NULL, &tgroup_stat);
1727 	CU_ASSERT(rc == -EINVAL);
1728 	CU_ASSERT(tgroup_stat == NULL);
1729 
1730 	/* Invalid stats, expect fail and return -EINVAL */
1731 	rc = nvme_tcp_poll_group_get_stats(tgroup, NULL);
1732 	CU_ASSERT(rc == -EINVAL);
1733 
1734 	/* Get stats success */
1735 	rc = nvme_tcp_poll_group_get_stats(tgroup, &tgroup_stat);
1736 	CU_ASSERT(rc == 0);
1737 	CU_ASSERT(tgroup_stat != NULL);
1738 	CU_ASSERT(tgroup_stat->trtype == SPDK_NVME_TRANSPORT_TCP);
1739 	CU_ASSERT(memcmp(&tgroup_stat->tcp, &pgroup->stats, sizeof(struct spdk_nvme_tcp_stat)) == 0);
1740 
1741 	nvme_tcp_poll_group_free_stats(tgroup, tgroup_stat);
1742 	rc = nvme_tcp_poll_group_destroy(tgroup);
1743 	CU_ASSERT(rc == 0);
1744 
1745 	MOCK_CLEAR(spdk_sock_group_create);
1746 }
1747 
1748 static void
1749 test_nvme_tcp_ctrlr_construct(void)
1750 {
1751 	struct nvme_tcp_qpair *tqpair = NULL;
1752 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1753 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1754 	struct spdk_nvme_transport_id trid = {
1755 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1756 		.priority = 1,
1757 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1758 		.traddr = "192.168.1.78",
1759 		.trsvcid = "23",
1760 	};
1761 	struct spdk_nvme_ctrlr_opts opts = {
1762 		.admin_queue_size = 2,
1763 		.src_addr = "192.168.1.77",
1764 		.src_svcid = "23",
1765 	};
1766 
1767 	/* Transmit ACK timeout value exceeds max, expected to pass and using max */
1768 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1769 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDEADBEEF);
1770 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1771 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1772 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1773 
1774 	CU_ASSERT(ctrlr != NULL);
1775 	CU_ASSERT(tctrlr != NULL);
1776 	CU_ASSERT(tqpair != NULL);
1777 	CU_ASSERT(ctrlr->opts.transport_ack_timeout == NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
1778 	CU_ASSERT(memcmp(&ctrlr->trid, &trid, sizeof(struct spdk_nvme_transport_id)) == 0);
1779 	CU_ASSERT(tqpair->num_entries == 1);
1780 	CU_ASSERT(TAILQ_EMPTY(&tqpair->send_queue));
1781 	CU_ASSERT(TAILQ_EMPTY(&tqpair->outstanding_reqs));
1782 	CU_ASSERT(!TAILQ_EMPTY(&tqpair->free_reqs));
1783 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs) == &tqpair->tcp_reqs[0]);
1784 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->cid == 0);
1785 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->tqpair == tqpair);
1786 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->pdu == &tqpair->send_pdus[0]);
1787 	CU_ASSERT(tqpair->send_pdu == &tqpair->send_pdus[1]);
1788 	CU_ASSERT(tqpair->recv_pdu == &tqpair->send_pdus[2]);
1789 
1790 	free(tqpair->tcp_reqs);
1791 	spdk_free(tqpair->send_pdus);
1792 	free(tqpair);
1793 	free(tctrlr);
1794 
1795 	/* The Admin queue size is less than the minimum required size, expected to create Admin qpair failed */
1796 	opts.admin_queue_size = 1;
1797 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1798 	CU_ASSERT(ctrlr == NULL);
1799 
1800 	/* Unhandled ADRFAM, expected to create Admin qpair failed */
1801 	opts.admin_queue_size = 2;
1802 	trid.adrfam = SPDK_NVMF_ADRFAM_INTRA_HOST;
1803 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1804 	CU_ASSERT(ctrlr == NULL);
1805 
1806 	/* Error connecting socket, expected to create Admin qpair failed */
1807 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1808 	MOCK_SET(spdk_sock_connect_ext, NULL);
1809 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1810 	CU_ASSERT(ctrlr == NULL);
1811 
1812 	MOCK_CLEAR(spdk_sock_connect_ext);
1813 }
1814 
1815 static void
1816 test_nvme_tcp_qpair_submit_request(void)
1817 {
1818 	int rc = 0;
1819 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1820 	struct nvme_tcp_qpair *tqpair = NULL;
1821 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1822 	struct nvme_tcp_req *tcp_req = NULL;
1823 	struct nvme_request req = {};
1824 	struct nvme_tcp_ut_bdev_io bio = {};
1825 	struct spdk_nvme_tcp_stat stat = {};
1826 	struct spdk_nvme_transport_id trid = {
1827 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1828 		.priority = 1,
1829 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1830 		.traddr = "192.168.1.78",
1831 		.trsvcid = "23",
1832 	};
1833 	struct spdk_nvme_ctrlr_opts opts = {
1834 		.admin_queue_size = 2,
1835 		.src_addr = "192.168.1.77",
1836 		.src_svcid = "23",
1837 	};
1838 
1839 	/* Construct TCP Controller */
1840 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1841 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDCADBEEF);
1842 
1843 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1844 	CU_ASSERT(ctrlr != NULL);
1845 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1846 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1847 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
1848 	CU_ASSERT(tctrlr != NULL);
1849 	CU_ASSERT(tqpair != NULL);
1850 	CU_ASSERT(tcp_req->pdu != NULL);
1851 	CU_ASSERT(tqpair->num_entries == 1);
1852 
1853 	tqpair->stats = &stat;
1854 	req.qpair = &tqpair->qpair;
1855 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
1856 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
1857 
1858 	/* Failed to construct request, because not enough max_sges */
1859 	req.qpair->ctrlr->max_sges = 1;
1860 	req.payload_size = 2048;
1861 	req.payload_offset = 0;
1862 	bio.iovpos = 0;
1863 	bio.iovs[0].iov_len = 1024;
1864 	bio.iovs[1].iov_len = 1024;
1865 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
1866 	bio.iovs[1].iov_base = (void *)0xDFADBEEF;
1867 
1868 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1869 	CU_ASSERT(rc == -1);
1870 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->free_reqs));
1871 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_FREE);
1872 
1873 	/* Multiple SGL, expected to pass */
1874 	req.qpair->ctrlr->max_sges = 2;
1875 
1876 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1877 	CU_ASSERT(rc == 0);
1878 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1879 	CU_ASSERT(NULL == TAILQ_FIRST(&tqpair->free_reqs));
1880 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->outstanding_reqs));
1881 	CU_ASSERT(tcp_req->expected_datao == 0);
1882 	CU_ASSERT(tcp_req->req == &req);
1883 	CU_ASSERT(tcp_req->r2tl_remain == 0);
1884 	CU_ASSERT(tcp_req->r2tl_remain_next == 0);
1885 	CU_ASSERT(tcp_req->active_r2ts == 0);
1886 	CU_ASSERT(tcp_req->iovcnt == 2);
1887 	CU_ASSERT(tcp_req->ordering.raw == 0);
1888 	CU_ASSERT(req.cmd.cid == tcp_req->cid);
1889 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
1890 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
1891 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
1892 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
1893 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
1894 	CU_ASSERT(tcp_req->in_capsule_data == true);
1895 	CU_ASSERT(tcp_req->iov[0].iov_len == bio.iovs[0].iov_len);
1896 	CU_ASSERT(tcp_req->iov[1].iov_len == bio.iovs[1].iov_len);
1897 	CU_ASSERT(tcp_req->iov[0].iov_base == bio.iovs[0].iov_base);
1898 	CU_ASSERT(tcp_req->iov[1].iov_base == bio.iovs[1].iov_base);
1899 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
1900 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) == 0);
1901 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_DDGSTF) == 0);
1902 	CU_ASSERT(tcp_req->datao == 0);
1903 	CU_ASSERT(tcp_req->pdu->data_len == req.payload_size);
1904 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdo == sizeof(struct spdk_nvme_tcp_cmd));
1905 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.plen == sizeof(struct spdk_nvme_tcp_cmd) +
1906 		  req.payload_size);
1907 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_base == (void *)0xDEADBEEF);
1908 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_len == 1024);
1909 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_base == (void *)0xDFADBEEF);
1910 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_len == 1024);
1911 	CU_ASSERT(tcp_req->pdu->data_iovcnt == 2);
1912 
1913 	/* Request resource limit reached, expected to return -EAGAIN */
1914 	memset(&req, 0x00, sizeof(struct nvme_request));
1915 	CU_ASSERT(tqpair->stats->queued_requests == 0);
1916 
1917 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1918 	CU_ASSERT(rc == -EAGAIN);
1919 	CU_ASSERT(tqpair->stats->queued_requests == 1);
1920 
1921 	MOCK_CLEAR(spdk_sock_connect_ext);
1922 	free(tqpair->tcp_reqs);
1923 	spdk_free(tqpair->send_pdus);
1924 	free(tqpair);
1925 	free(tctrlr);
1926 }
1927 
1928 int
1929 main(int argc, char **argv)
1930 {
1931 	CU_pSuite	suite = NULL;
1932 	unsigned int	num_failures;
1933 
1934 	CU_initialize_registry();
1935 
1936 	suite = CU_add_suite("nvme_tcp", NULL, NULL);
1937 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
1938 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
1939 	CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
1940 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
1941 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
1942 	CU_ADD_TEST(suite, test_nvme_tcp_req_complete_safe);
1943 	CU_ADD_TEST(suite, test_nvme_tcp_req_get);
1944 	CU_ADD_TEST(suite, test_nvme_tcp_req_init);
1945 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_capsule_cmd_send);
1946 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_write_pdu);
1947 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_set_recv_state);
1948 	CU_ADD_TEST(suite, test_nvme_tcp_alloc_reqs);
1949 	CU_ADD_TEST(suite, test_nvme_tcp_parse_addr);
1950 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_send_h2c_term_req);
1951 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_ch_handle);
1952 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_connect_sock);
1953 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_icreq_send);
1954 	CU_ADD_TEST(suite, test_nvme_tcp_c2h_payload_handle);
1955 	CU_ADD_TEST(suite, test_nvme_tcp_icresp_handle);
1956 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_payload_handle);
1957 	CU_ADD_TEST(suite, test_nvme_tcp_capsule_resp_hdr_handle);
1958 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_connect_qpair);
1959 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_disconnect_qpair);
1960 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_create_io_qpair);
1961 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_delete_io_qpair);
1962 	CU_ADD_TEST(suite, test_nvme_tcp_poll_group_get_stats);
1963 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_construct);
1964 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_submit_request);
1965 
1966 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1967 	CU_cleanup_registry();
1968 	return num_failures;
1969 }
1970