xref: /spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c (revision 14e26b9d0410a98689caffcba7bfacac8d85c74d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk/nvme.h"
9 
10 #include "spdk_internal/cunit.h"
11 
12 #include "common/lib/test_sock.c"
13 #include "nvme/nvme_internal.h"
14 #include "common/lib/nvme/common_stubs.h"
15 
16 /* nvme_transport_ctrlr_disconnect_qpair_done() stub is defined in common_stubs.h, but we need to
17  * override it here */
18 static void nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair);
19 #define nvme_transport_ctrlr_disconnect_qpair_done nvme_transport_ctrlr_disconnect_qpair_done_mocked
20 
21 #include "nvme/nvme_tcp.c"
22 
23 SPDK_LOG_REGISTER_COMPONENT(nvme)
24 
25 DEFINE_STUB(nvme_qpair_submit_request,
26 	    int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
27 
28 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
29 		struct spdk_nvme_qpair *qpair), 0);
30 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
31 	    int,
32 	    (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint),
33 	    0);
34 
35 DEFINE_STUB(spdk_sock_group_get_ctx,
36 	    void *,
37 	    (struct spdk_sock_group *group),
38 	    NULL);
39 
40 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
41 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
42 
43 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
44 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
45 
46 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
47 		struct spdk_nvme_cmd *cmd));
48 
49 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
50 		struct spdk_nvme_cpl *cpl));
51 
52 DEFINE_STUB(spdk_memory_domain_get_system_domain, struct spdk_memory_domain *, (void), NULL);
53 DEFINE_STUB(spdk_memory_domain_translate_data, int,
54 	    (struct spdk_memory_domain *src_domain, void *src_domain_ctx,
55 	     struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
56 	     void *addr, size_t len, struct spdk_memory_domain_translation_result *result), 0);
57 DEFINE_STUB_V(spdk_memory_domain_invalidate_data, (struct spdk_memory_domain *domain,
58 		void *domain_ctx, struct iovec *iov, uint32_t iovcnt));
59 
60 static void
61 nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair)
62 {
63 	qpair->state = NVME_QPAIR_DISCONNECTED;
64 }
65 
66 static void
67 test_nvme_tcp_pdu_set_data_buf(void)
68 {
69 	struct nvme_tcp_pdu pdu = {};
70 	struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
71 	uint32_t data_len;
72 	uint64_t i;
73 
74 	/* 1st case: input is a single SGL entry. */
75 	iov[0].iov_base = (void *)0xDEADBEEF;
76 	iov[0].iov_len = 4096;
77 
78 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
79 
80 	CU_ASSERT(pdu.data_iovcnt == 1);
81 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
82 	CU_ASSERT(pdu.data_iov[0].iov_len == 512);
83 
84 	/* 2nd case: simulate split on multiple SGL entries. */
85 	iov[0].iov_base = (void *)0xDEADBEEF;
86 	iov[0].iov_len = 4096;
87 	iov[1].iov_base = (void *)0xFEEDBEEF;
88 	iov[1].iov_len = 512 * 7;
89 	iov[2].iov_base = (void *)0xF00DF00D;
90 	iov[2].iov_len = 4096 * 2;
91 
92 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
93 
94 	CU_ASSERT(pdu.data_iovcnt == 1);
95 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
96 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
97 
98 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
99 
100 	CU_ASSERT(pdu.data_iovcnt == 2);
101 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
102 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
103 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
104 	CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
105 
106 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
107 
108 	CU_ASSERT(pdu.data_iovcnt == 2);
109 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
110 	CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
111 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
112 	CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
113 
114 	/* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
115 	 * entries.
116 	 */
117 	data_len = 0;
118 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
119 		iov[i].iov_base = (void *)(0xDEADBEEF + i);
120 		iov[i].iov_len = 512 * (i + 1);
121 		data_len += 512 * (i + 1);
122 	}
123 
124 	nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
125 
126 	CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
127 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
128 		CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
129 		CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
130 	}
131 }
132 
133 static void
134 test_nvme_tcp_build_iovs(void)
135 {
136 	const uintptr_t pdu_iov_len = 4096;
137 	struct nvme_tcp_pdu pdu = {};
138 	struct iovec iovs[5] = {};
139 	uint32_t mapped_length = 0;
140 	int rc;
141 
142 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
143 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
144 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
145 			      SPDK_NVME_TCP_DIGEST_LEN;
146 	pdu.data_len = pdu_iov_len * 2;
147 	pdu.padding_len = 0;
148 
149 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
150 	pdu.data_iov[0].iov_len = pdu_iov_len;
151 	pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
152 	pdu.data_iov[1].iov_len = pdu_iov_len;
153 	pdu.data_iovcnt = 2;
154 
155 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
156 	CU_ASSERT(rc == 4);
157 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
158 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
159 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
160 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
161 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
162 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
163 	CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
164 	CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
165 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
166 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
167 
168 	/* Add a new data_iov entry, update pdu iov count and data length */
169 	pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
170 	pdu.data_iov[2].iov_len = 123;
171 	pdu.data_iovcnt = 3;
172 	pdu.data_len += 123;
173 	pdu.hdr.common.plen += 123;
174 
175 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
176 	CU_ASSERT(rc == 5);
177 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
178 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
179 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
180 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
181 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
182 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
183 	CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
184 	CU_ASSERT(iovs[3].iov_len == 123);
185 	CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
186 	CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
187 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
188 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
189 }
190 
191 struct nvme_tcp_ut_bdev_io {
192 	struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
193 	int iovpos;
194 };
195 
196 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
197 static void
198 nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
199 {
200 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
201 	struct iovec *iov;
202 
203 	for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
204 		iov = &bio->iovs[bio->iovpos];
205 		/* Offset must be aligned with the start of any SGL entry */
206 		if (offset == 0) {
207 			break;
208 		}
209 
210 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
211 		offset -= iov->iov_len;
212 	}
213 
214 	SPDK_CU_ASSERT_FATAL(offset == 0);
215 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
216 }
217 
218 static int
219 nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
220 {
221 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
222 	struct iovec *iov;
223 
224 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
225 
226 	iov = &bio->iovs[bio->iovpos];
227 
228 	*address = iov->iov_base;
229 	*length = iov->iov_len;
230 	bio->iovpos++;
231 
232 	return 0;
233 }
234 
235 static void
236 test_nvme_tcp_build_sgl_request(void)
237 {
238 	struct nvme_tcp_qpair tqpair;
239 	struct spdk_nvme_ctrlr ctrlr = {{0}};
240 	struct nvme_tcp_req tcp_req = {0};
241 	struct nvme_request req = {{0}};
242 	struct nvme_tcp_ut_bdev_io bio;
243 	uint64_t i;
244 	int rc;
245 
246 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
247 	tqpair.qpair.ctrlr = &ctrlr;
248 	tcp_req.req = &req;
249 
250 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
251 	req.qpair = &tqpair.qpair;
252 
253 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
254 		bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
255 		bio.iovs[i].iov_len = 0;
256 	}
257 
258 	/* Test case 1: Single SGL. Expected: PASS */
259 	bio.iovpos = 0;
260 	req.payload_offset = 0;
261 	req.payload_size = 0x1000;
262 	bio.iovs[0].iov_len = 0x1000;
263 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
264 	SPDK_CU_ASSERT_FATAL(rc == 0);
265 	CU_ASSERT(bio.iovpos == 1);
266 	CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
267 	CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
268 	CU_ASSERT(tcp_req.iovcnt == 1);
269 
270 	/* Test case 2: Multiple SGL. Expected: PASS */
271 	bio.iovpos = 0;
272 	req.payload_offset = 0;
273 	req.payload_size = 0x4000;
274 	for (i = 0; i < 4; i++) {
275 		bio.iovs[i].iov_len = 0x1000;
276 	}
277 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
278 	SPDK_CU_ASSERT_FATAL(rc == 0);
279 	CU_ASSERT(bio.iovpos == 4);
280 	CU_ASSERT(tcp_req.iovcnt == 4);
281 	for (i = 0; i < 4; i++) {
282 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
283 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
284 	}
285 
286 	/* Test case 3: Payload is bigger than SGL. Expected: FAIL */
287 	bio.iovpos = 0;
288 	req.payload_offset = 0;
289 	req.payload_size = 0x17000;
290 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
291 		bio.iovs[i].iov_len = 0x1000;
292 	}
293 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
294 	SPDK_CU_ASSERT_FATAL(rc != 0);
295 	CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
296 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
297 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
298 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
299 	}
300 }
301 
302 static void
303 test_nvme_tcp_pdu_set_data_buf_with_md(void)
304 {
305 	struct nvme_tcp_pdu pdu = {};
306 	struct iovec iovs[7] = {};
307 	struct spdk_dif_ctx dif_ctx = {};
308 	int rc;
309 	struct spdk_dif_ctx_init_ext_opts dif_opts;
310 
311 	pdu.dif_ctx = &dif_ctx;
312 
313 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
314 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
315 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
316 			       0, 0, 0, 0, 0, &dif_opts);
317 	CU_ASSERT(rc == 0);
318 
319 	/* Single iovec case */
320 	iovs[0].iov_base = (void *)0xDEADBEEF;
321 	iovs[0].iov_len = 2080;
322 
323 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
324 
325 	CU_ASSERT(dif_ctx.data_offset == 0);
326 	CU_ASSERT(pdu.data_len == 500);
327 	CU_ASSERT(pdu.data_iovcnt == 1);
328 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
329 	CU_ASSERT(pdu.data_iov[0].iov_len == 500);
330 
331 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
332 
333 	CU_ASSERT(dif_ctx.data_offset == 500);
334 	CU_ASSERT(pdu.data_len == 1000);
335 	CU_ASSERT(pdu.data_iovcnt == 1);
336 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
337 	CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
338 
339 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
340 
341 	CU_ASSERT(dif_ctx.data_offset == 1500);
342 	CU_ASSERT(pdu.data_len == 548);
343 	CU_ASSERT(pdu.data_iovcnt == 1);
344 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
345 	CU_ASSERT(pdu.data_iov[0].iov_len == 564);
346 
347 	/* Multiple iovecs case */
348 	iovs[0].iov_base = (void *)0xDEADBEEF;
349 	iovs[0].iov_len = 256;
350 	iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
351 	iovs[1].iov_len = 256 + 1;
352 	iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
353 	iovs[2].iov_len = 4;
354 	iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
355 	iovs[3].iov_len = 3 + 123;
356 	iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
357 	iovs[4].iov_len = 389 + 6;
358 	iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
359 	iovs[5].iov_len = 2 + 512 + 8 + 432;
360 	iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
361 	iovs[6].iov_len = 80 + 8;
362 
363 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
364 
365 	CU_ASSERT(dif_ctx.data_offset == 0);
366 	CU_ASSERT(pdu.data_len == 500);
367 	CU_ASSERT(pdu.data_iovcnt == 2);
368 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
369 	CU_ASSERT(pdu.data_iov[0].iov_len == 256);
370 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
371 	CU_ASSERT(pdu.data_iov[1].iov_len == 244);
372 
373 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
374 
375 	CU_ASSERT(dif_ctx.data_offset == 500);
376 	CU_ASSERT(pdu.data_len == 1000);
377 	CU_ASSERT(pdu.data_iovcnt == 5);
378 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
379 	CU_ASSERT(pdu.data_iov[0].iov_len == 13);
380 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
381 	CU_ASSERT(pdu.data_iov[1].iov_len == 4);
382 	CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
383 	CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
384 	CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
385 	CU_ASSERT(pdu.data_iov[3].iov_len == 395);
386 	CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
387 	CU_ASSERT(pdu.data_iov[4].iov_len == 478);
388 
389 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
390 
391 	CU_ASSERT(dif_ctx.data_offset == 1500);
392 	CU_ASSERT(pdu.data_len == 548);
393 	CU_ASSERT(pdu.data_iovcnt == 2);
394 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
395 	CU_ASSERT(pdu.data_iov[0].iov_len == 476);
396 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
397 	CU_ASSERT(pdu.data_iov[1].iov_len == 88);
398 }
399 
400 static void
401 test_nvme_tcp_build_iovs_with_md(void)
402 {
403 	struct nvme_tcp_pdu pdu = {};
404 	struct iovec iovs[11] = {};
405 	struct spdk_dif_ctx dif_ctx = {};
406 	uint32_t mapped_length = 0;
407 	int rc;
408 	struct spdk_dif_ctx_init_ext_opts dif_opts;
409 
410 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
411 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
412 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
413 			       0, 0, 0, 0, 0, &dif_opts);
414 	CU_ASSERT(rc == 0);
415 
416 	pdu.dif_ctx = &dif_ctx;
417 
418 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
419 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
420 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
421 			      SPDK_NVME_TCP_DIGEST_LEN;
422 	pdu.data_len = 512 * 8;
423 	pdu.padding_len = 0;
424 
425 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
426 	pdu.data_iov[0].iov_len = (512 + 8) * 8;
427 	pdu.data_iovcnt = 1;
428 
429 	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
430 	CU_ASSERT(rc == 10);
431 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
432 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
433 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
434 	CU_ASSERT(iovs[1].iov_len == 512);
435 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
436 	CU_ASSERT(iovs[2].iov_len == 512);
437 	CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
438 	CU_ASSERT(iovs[3].iov_len == 512);
439 	CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
440 	CU_ASSERT(iovs[4].iov_len == 512);
441 	CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
442 	CU_ASSERT(iovs[5].iov_len == 512);
443 	CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
444 	CU_ASSERT(iovs[6].iov_len == 512);
445 	CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
446 	CU_ASSERT(iovs[7].iov_len == 512);
447 	CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
448 	CU_ASSERT(iovs[8].iov_len == 512);
449 	CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
450 	CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
451 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
452 		  512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
453 }
454 
455 /* Just define, nothing to do */
456 static void
457 ut_nvme_complete_request(void *arg, const struct spdk_nvme_cpl *cpl)
458 {
459 	return;
460 }
461 
462 static void
463 test_nvme_tcp_req_complete_safe(void)
464 {
465 	bool rc;
466 	struct nvme_tcp_req	tcp_req = {0};
467 	struct nvme_request	req = {{0}};
468 	struct nvme_tcp_qpair	tqpair = {{0}};
469 
470 	tcp_req.req = &req;
471 	tcp_req.req->qpair = &tqpair.qpair;
472 	tcp_req.req->cb_fn = ut_nvme_complete_request;
473 	tcp_req.tqpair = &tqpair;
474 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
475 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
476 	tqpair.qpair.num_outstanding_reqs = 1;
477 
478 	/* Test case 1: send operation and transfer completed. Expect: PASS */
479 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
480 	tcp_req.ordering.bits.send_ack = 1;
481 	tcp_req.ordering.bits.data_recv = 1;
482 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
483 
484 	rc = nvme_tcp_req_complete_safe(&tcp_req);
485 	CU_ASSERT(rc == true);
486 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
487 
488 	/* Test case 2: send operation not completed. Expect: FAIL */
489 	tcp_req.ordering.raw = 0;
490 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
491 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
492 	tqpair.qpair.num_outstanding_reqs = 1;
493 
494 	rc = nvme_tcp_req_complete_safe(&tcp_req);
495 	SPDK_CU_ASSERT_FATAL(rc != true);
496 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
497 	TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
498 
499 	/* Test case 3: in completion context. Expect: PASS */
500 	tqpair.qpair.in_completion_context = 1;
501 	tqpair.async_complete = 0;
502 	tcp_req.ordering.bits.send_ack = 1;
503 	tcp_req.ordering.bits.data_recv = 1;
504 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
505 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
506 	tqpair.qpair.num_outstanding_reqs = 1;
507 
508 	rc = nvme_tcp_req_complete_safe(&tcp_req);
509 	CU_ASSERT(rc == true);
510 	CU_ASSERT(tcp_req.tqpair->async_complete == 0);
511 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
512 
513 	/* Test case 4: in async complete. Expect: PASS */
514 	tqpair.qpair.in_completion_context = 0;
515 	tcp_req.ordering.bits.send_ack = 1;
516 	tcp_req.ordering.bits.data_recv = 1;
517 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
518 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
519 	tqpair.qpair.num_outstanding_reqs = 1;
520 
521 	rc = nvme_tcp_req_complete_safe(&tcp_req);
522 	CU_ASSERT(rc == true);
523 	CU_ASSERT(tcp_req.tqpair->async_complete);
524 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
525 }
526 
527 static void
528 test_nvme_tcp_req_init(void)
529 {
530 	struct nvme_tcp_qpair tqpair = {};
531 	struct nvme_request req = {};
532 	struct nvme_tcp_req tcp_req = {0};
533 	struct spdk_nvme_ctrlr ctrlr = {{0}};
534 	struct nvme_tcp_ut_bdev_io bio = {};
535 	int rc;
536 
537 	tqpair.qpair.ctrlr = &ctrlr;
538 	req.qpair = &tqpair.qpair;
539 
540 	tcp_req.cid = 1;
541 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
542 	req.payload_offset = 0;
543 	req.payload_size = 4096;
544 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
545 	ctrlr.ioccsz_bytes = 1024;
546 	bio.iovpos = 0;
547 	bio.iovs[0].iov_len = 8192;
548 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
549 
550 	/* Test case1: payload type SGL. Expect: PASS */
551 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
552 	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
553 
554 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
555 	CU_ASSERT(rc == 0);
556 	CU_ASSERT(tcp_req.req == &req);
557 	CU_ASSERT(tcp_req.in_capsule_data == true);
558 	CU_ASSERT(tcp_req.iovcnt == 1);
559 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
560 	CU_ASSERT(tcp_req.iov[0].iov_base == bio.iovs[0].iov_base);
561 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
562 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
563 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
564 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
565 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
566 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
567 
568 	/* Test case2: payload type CONTIG. Expect: PASS */
569 	memset(&req.cmd, 0, sizeof(req.cmd));
570 	memset(&tcp_req, 0, sizeof(tcp_req));
571 	tcp_req.cid = 1;
572 	req.payload = NVME_PAYLOAD_CONTIG(&bio, NULL);
573 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
574 
575 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
576 	CU_ASSERT(rc == 0);
577 	CU_ASSERT(tcp_req.req == &req);
578 	CU_ASSERT(tcp_req.in_capsule_data == true);
579 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
580 	CU_ASSERT(tcp_req.iov[0].iov_base == &bio);
581 	CU_ASSERT(tcp_req.iovcnt == 1);
582 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
583 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
584 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
585 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
586 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
587 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
588 
589 }
590 
591 static void
592 test_nvme_tcp_req_get(void)
593 {
594 	struct nvme_tcp_req tcp_req = {0};
595 	struct nvme_tcp_qpair tqpair = {};
596 	struct nvme_tcp_pdu send_pdu = {};
597 
598 	tcp_req.pdu = &send_pdu;
599 	tcp_req.state = NVME_TCP_REQ_FREE;
600 
601 	TAILQ_INIT(&tqpair.free_reqs);
602 	TAILQ_INIT(&tqpair.outstanding_reqs);
603 	TAILQ_INSERT_HEAD(&tqpair.free_reqs, &tcp_req, link);
604 
605 	CU_ASSERT(nvme_tcp_req_get(&tqpair) == &tcp_req);
606 	CU_ASSERT(tcp_req.state == NVME_TCP_REQ_ACTIVE);
607 	CU_ASSERT(tcp_req.datao == 0);
608 	CU_ASSERT(tcp_req.req == NULL);
609 	CU_ASSERT(tcp_req.in_capsule_data == false);
610 	CU_ASSERT(tcp_req.r2tl_remain == 0);
611 	CU_ASSERT(tcp_req.iovcnt == 0);
612 	CU_ASSERT(tcp_req.ordering.raw == 0);
613 	/* outstanding_reqs should still be empty - caller is responsible
614 	 * for putting it on the TAILQ after any other initialization is
615 	 * completed.
616 	 */
617 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
618 	CU_ASSERT(TAILQ_EMPTY(&tqpair.free_reqs));
619 
620 	/* No tcp request available, expect fail */
621 	SPDK_CU_ASSERT_FATAL(nvme_tcp_req_get(&tqpair) == NULL);
622 }
623 
624 static void
625 test_nvme_tcp_qpair_capsule_cmd_send(void)
626 {
627 	struct nvme_tcp_qpair tqpair = {};
628 	struct spdk_nvme_tcp_stat stats = {};
629 	struct nvme_tcp_req tcp_req = {};
630 	struct nvme_tcp_pdu pdu = {};
631 	struct nvme_request req = {};
632 	char iov_base0[4096];
633 	char iov_base1[4096];
634 	uint32_t plen;
635 	uint8_t pdo;
636 
637 	memset(iov_base0, 0xFF, 4096);
638 	memset(iov_base1, 0xFF, 4096);
639 	tcp_req.req = &req;
640 	tcp_req.pdu = &pdu;
641 	TAILQ_INIT(&tqpair.send_queue);
642 	tqpair.stats = &stats;
643 
644 	tcp_req.iov[0].iov_base = (void *)iov_base0;
645 	tcp_req.iov[0].iov_len = 4096;
646 	tcp_req.iov[1].iov_base = (void *)iov_base1;
647 	tcp_req.iov[1].iov_len = 4096;
648 	tcp_req.iovcnt = 2;
649 	tcp_req.req->payload_size = 8192;
650 	tcp_req.in_capsule_data = true;
651 	tqpair.cpda = NVME_TCP_HPDA_DEFAULT;
652 
653 	/* Test case 1: host hdgst and ddgst enable. Expect: PASS */
654 	tqpair.flags.host_hdgst_enable = 1;
655 	tqpair.flags.host_ddgst_enable = 1;
656 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd) +
657 		     SPDK_NVME_TCP_DIGEST_LEN;
658 	plen += tcp_req.req->payload_size;
659 	plen += SPDK_NVME_TCP_DIGEST_LEN;
660 
661 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
662 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
663 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
664 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
665 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
666 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
667 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
668 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
669 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
670 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
671 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
672 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
673 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
674 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
675 
676 	/* Test case 2: host hdgst and ddgst disable. Expect: PASS */
677 	memset(&pdu, 0, sizeof(pdu));
678 	tqpair.flags.host_hdgst_enable = 0;
679 	tqpair.flags.host_ddgst_enable = 0;
680 
681 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd);
682 	plen += tcp_req.req->payload_size;
683 
684 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
685 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
686 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags == 0)
687 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
688 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
689 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
690 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
691 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
692 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
693 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
694 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
695 
696 	/* Test case 3: padding available. Expect: PASS */
697 	memset(&pdu, 0, sizeof(pdu));
698 	tqpair.flags.host_hdgst_enable = 1;
699 	tqpair.flags.host_ddgst_enable = 1;
700 	tqpair.cpda = SPDK_NVME_TCP_CPDA_MAX;
701 
702 	pdo = plen = (SPDK_NVME_TCP_CPDA_MAX + 1) << 2;
703 	plen += tcp_req.req->payload_size;
704 	plen += SPDK_NVME_TCP_DIGEST_LEN;
705 
706 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
707 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
708 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
709 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
710 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
711 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
712 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
713 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
714 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
715 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
716 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
717 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
718 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
719 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
720 }
721 
722 /* Just define, nothing to do */
723 static void
724 ut_nvme_tcp_qpair_xfer_complete_cb(void *cb_arg)
725 {
726 	return;
727 }
728 
729 static void
730 test_nvme_tcp_qpair_write_pdu(void)
731 {
732 	struct nvme_tcp_qpair tqpair = {};
733 	struct spdk_nvme_tcp_stat stats = {};
734 	struct nvme_request req = {};
735 	struct nvme_tcp_req treq = { .req = &req };
736 	struct nvme_tcp_pdu pdu = { .req = &treq };
737 	void *cb_arg = (void *)0xDEADBEEF;
738 	char iov_base0[4096];
739 	char iov_base1[4096];
740 
741 	memset(iov_base0, 0xFF, 4096);
742 	memset(iov_base1, 0xFF, 4096);
743 	pdu.data_len = 4096 * 2;
744 	pdu.padding_len = 0;
745 	pdu.data_iov[0].iov_base = (void *)iov_base0;
746 	pdu.data_iov[0].iov_len = 4096;
747 	pdu.data_iov[1].iov_base = (void *)iov_base1;
748 	pdu.data_iov[1].iov_len = 4096;
749 	pdu.data_iovcnt = 2;
750 	TAILQ_INIT(&tqpair.send_queue);
751 
752 	/* Test case1: host hdgst and ddgst enable Expect: PASS */
753 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
754 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
755 
756 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
757 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
758 	pdu.hdr.common.plen = pdu.hdr.common.hlen +
759 			      SPDK_NVME_TCP_DIGEST_LEN * 2 ;
760 	pdu.hdr.common.plen += pdu.data_len;
761 	tqpair.flags.host_hdgst_enable = 1;
762 	tqpair.flags.host_ddgst_enable = 1;
763 	tqpair.stats = &stats;
764 
765 	nvme_tcp_qpair_write_pdu(&tqpair,
766 				 &pdu,
767 				 ut_nvme_tcp_qpair_xfer_complete_cb,
768 				 cb_arg);
769 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
770 	/* Check the crc data of header digest filled into raw */
771 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen]);
772 	CU_ASSERT(pdu.data_digest[0]);
773 	CU_ASSERT(pdu.sock_req.iovcnt == 4);
774 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
775 	CU_ASSERT(pdu.iov[0].iov_len == (sizeof(struct spdk_nvme_tcp_cmd) +
776 					 SPDK_NVME_TCP_DIGEST_LEN));
777 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
778 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
779 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
780 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
781 	CU_ASSERT(pdu.iov[3].iov_base == &pdu.data_digest);
782 	CU_ASSERT(pdu.iov[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
783 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
784 	CU_ASSERT(pdu.cb_arg == cb_arg);
785 	CU_ASSERT(pdu.qpair == &tqpair);
786 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
787 
788 	/* Test case2: host hdgst and ddgst disable Expect: PASS */
789 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
790 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
791 
792 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
793 	pdu.hdr.common.plen = pdu.hdr.common.hlen  + pdu.data_len;
794 	tqpair.flags.host_hdgst_enable = 0;
795 	tqpair.flags.host_ddgst_enable = 0;
796 
797 	nvme_tcp_qpair_write_pdu(&tqpair,
798 				 &pdu,
799 				 ut_nvme_tcp_qpair_xfer_complete_cb,
800 				 cb_arg);
801 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
802 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen] == 0);
803 	CU_ASSERT(pdu.data_digest[0] == 0);
804 	CU_ASSERT(pdu.sock_req.iovcnt == 3);
805 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
806 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd));
807 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
808 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
809 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
810 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
811 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
812 	CU_ASSERT(pdu.cb_arg == cb_arg);
813 	CU_ASSERT(pdu.qpair == &tqpair);
814 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
815 }
816 
817 static void
818 test_nvme_tcp_qpair_set_recv_state(void)
819 {
820 	struct nvme_tcp_qpair tqpair = {};
821 
822 	/* case1: The recv state of tqpair is same with the state to be set */
823 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
824 	nvme_tcp_qpair_set_recv_state(&tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
825 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
826 
827 	/* Different state will be set accordingly */
828 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY;
829 	nvme_tcp_qpair_set_recv_state(&tqpair, 0xff);
830 	CU_ASSERT(tqpair.recv_state == 0xff);
831 }
832 
833 static void
834 test_nvme_tcp_alloc_reqs(void)
835 {
836 	struct nvme_tcp_qpair tqpair = {};
837 	int rc = 0;
838 
839 	/* case1: single entry. Expect: PASS */
840 	tqpair.num_entries = 1;
841 	rc = nvme_tcp_alloc_reqs(&tqpair);
842 	CU_ASSERT(rc == 0);
843 	CU_ASSERT(tqpair.tcp_reqs[0].cid == 0);
844 	CU_ASSERT(tqpair.tcp_reqs[0].tqpair == &tqpair);
845 	CU_ASSERT(tqpair.tcp_reqs[0].pdu == &tqpair.send_pdus[0]);
846 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
847 	free(tqpair.tcp_reqs);
848 	spdk_free(tqpair.send_pdus);
849 
850 	/* case2: multiple entries. Expect: PASS */
851 	tqpair.num_entries = 5;
852 	rc = nvme_tcp_alloc_reqs(&tqpair);
853 	CU_ASSERT(rc == 0);
854 	for (int i = 0; i < tqpair.num_entries; i++) {
855 		CU_ASSERT(tqpair.tcp_reqs[i].cid == i);
856 		CU_ASSERT(tqpair.tcp_reqs[i].tqpair == &tqpair);
857 		CU_ASSERT(tqpair.tcp_reqs[i].pdu == &tqpair.send_pdus[i]);
858 	}
859 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
860 
861 	/* case3: Test nvme_tcp_free_reqs test. Expect: PASS */
862 	nvme_tcp_free_reqs(&tqpair);
863 	CU_ASSERT(tqpair.tcp_reqs == NULL);
864 	CU_ASSERT(tqpair.send_pdus == NULL);
865 }
866 
867 static void
868 test_nvme_tcp_qpair_send_h2c_term_req(void)
869 {
870 	struct nvme_tcp_qpair tqpair = {};
871 	struct spdk_nvme_tcp_stat stats = {};
872 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
873 	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
874 	uint32_t error_offset = 1;
875 
876 	tqpair.send_pdu = &send_pdu;
877 	tqpair.recv_pdu = &recv_pdu;
878 	tqpair.stats = &stats;
879 	TAILQ_INIT(&tqpair.send_queue);
880 	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
881 	pdu.hdr.common.hlen = 64;
882 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
883 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
884 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
885 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
886 		  pdu.hdr.common.hlen);
887 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
888 
889 	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
890 	pdu.hdr.common.hlen = 255;
891 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
892 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
893 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
894 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
895 		  tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
896 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
897 }
898 
899 static void
900 test_nvme_tcp_pdu_ch_handle(void)
901 {
902 	struct nvme_tcp_qpair tqpair = {};
903 	struct spdk_nvme_tcp_stat stats = {};
904 	struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
905 
906 	tqpair.send_pdu = &send_pdu;
907 	tqpair.recv_pdu = &recv_pdu;
908 	tqpair.stats = &stats;
909 	TAILQ_INIT(&tqpair.send_queue);
910 	/* case 1: Already received IC_RESP PDU. Expect: fail */
911 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
912 	tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
913 	nvme_tcp_pdu_ch_handle(&tqpair);
914 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
915 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
916 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
917 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
918 
919 	/* case 2: Expected PDU header length and received are different. Expect: fail */
920 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
921 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
922 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
923 	tqpair.recv_pdu->hdr.common.hlen = 0;
924 	nvme_tcp_pdu_ch_handle(&tqpair);
925 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
926 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
927 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
928 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
929 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
930 
931 	/* case 3: The TCP/IP tqpair connection is not negotiated. Expect: fail */
932 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
933 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
934 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
935 	tqpair.recv_pdu->hdr.common.hlen = 0;
936 	nvme_tcp_pdu_ch_handle(&tqpair);
937 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
938 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
939 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
940 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
941 
942 	/* case 4: Unexpected PDU type. Expect: fail */
943 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
944 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
945 	tqpair.recv_pdu->hdr.common.plen = 0;
946 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
947 	nvme_tcp_pdu_ch_handle(&tqpair);
948 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
949 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
950 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
951 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
952 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
953 
954 	/* case 5: plen error. Expect: fail */
955 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
956 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
957 	tqpair.recv_pdu->hdr.common.plen = 0;
958 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
959 	nvme_tcp_pdu_ch_handle(&tqpair);
960 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
961 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
962 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
963 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
964 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
965 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
966 
967 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
968 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
969 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
970 	tqpair.recv_pdu->hdr.common.plen = 0;
971 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
972 	nvme_tcp_pdu_ch_handle(&tqpair);
973 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
974 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
975 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
976 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
977 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
978 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
979 
980 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
981 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
982 	tqpair.recv_pdu->hdr.common.plen = 0;
983 	tqpair.recv_pdu->hdr.common.pdo = 64;
984 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
985 	nvme_tcp_pdu_ch_handle(&tqpair);
986 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
987 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
988 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
989 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
990 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
991 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
992 
993 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
994 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
995 	tqpair.recv_pdu->hdr.common.plen = 0;
996 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
997 	nvme_tcp_pdu_ch_handle(&tqpair);
998 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
999 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1000 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1001 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1002 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1003 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1004 
1005 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1006 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1007 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1008 	tqpair.recv_pdu->hdr.common.plen = 0;
1009 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
1010 	nvme_tcp_pdu_ch_handle(&tqpair);
1011 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1012 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1013 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1014 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1015 		  (unsigned)sizeof(struct spdk_nvme_tcp_r2t_hdr));
1016 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1017 
1018 	/* case 6: Expect:  PASS */
1019 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1020 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
1021 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1022 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1023 	nvme_tcp_pdu_ch_handle(&tqpair);
1024 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1025 	CU_ASSERT(tqpair.recv_pdu->psh_len == tqpair.recv_pdu->hdr.common.hlen - sizeof(
1026 			  struct spdk_nvme_tcp_common_pdu_hdr));
1027 }
1028 
1029 DEFINE_RETURN_MOCK(spdk_sock_connect_ext, struct spdk_sock *);
1030 struct spdk_sock *
1031 spdk_sock_connect_ext(const char *ip, int port,
1032 		      const char *_impl_name, struct spdk_sock_opts *opts)
1033 {
1034 	HANDLE_RETURN_MOCK(spdk_sock_connect_ext);
1035 	CU_ASSERT(port == 23);
1036 	CU_ASSERT(opts->opts_size == sizeof(*opts));
1037 	CU_ASSERT(opts->priority == 1);
1038 	CU_ASSERT(opts->zcopy == true);
1039 	CU_ASSERT(!strcmp(ip, "192.168.1.78"));
1040 	return (struct spdk_sock *)0xDDADBEEF;
1041 }
1042 
1043 static void
1044 test_nvme_tcp_qpair_connect_sock(void)
1045 {
1046 	struct nvme_tcp_ctrlr tctrlr = {};
1047 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1048 	struct nvme_tcp_qpair tqpair = {};
1049 	int rc;
1050 
1051 	tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1052 	tqpair.qpair.id = 1;
1053 	tqpair.qpair.poll_group = (void *)0xDEADBEEF;
1054 	ctrlr->trid.priority = 1;
1055 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1056 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1057 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1058 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1059 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1060 
1061 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1062 	CU_ASSERT(rc == 0);
1063 
1064 	/* Unsupported family of the transport address */
1065 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IB;
1066 
1067 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1068 	SPDK_CU_ASSERT_FATAL(rc == -1);
1069 
1070 	/* Invalid dst_port, INT_MAX is 2147483647 */
1071 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1072 	memcpy(ctrlr->trid.trsvcid, "2147483647", sizeof("2147483647"));
1073 
1074 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1075 	SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
1076 
1077 	/* Parse invalid address */
1078 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1079 	memcpy(ctrlr->trid.traddr, "192.168.1.256", sizeof("192.168.1.256"));
1080 
1081 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1082 	SPDK_CU_ASSERT_FATAL(rc != 0);
1083 }
1084 
1085 static void
1086 test_nvme_tcp_qpair_icreq_send(void)
1087 {
1088 	struct nvme_tcp_qpair tqpair = {};
1089 	struct spdk_nvme_tcp_stat stats = {};
1090 	struct spdk_nvme_ctrlr ctrlr = {};
1091 	struct nvme_tcp_pdu pdu = {};
1092 	struct nvme_tcp_poll_group poll_group = {};
1093 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1094 	int rc;
1095 
1096 	tqpair.send_pdu = &pdu;
1097 	tqpair.qpair.ctrlr = &ctrlr;
1098 	tqpair.qpair.poll_group = &poll_group.group;
1099 	tqpair.stats = &stats;
1100 	ic_req = &pdu.hdr.ic_req;
1101 
1102 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1103 	tqpair.qpair.ctrlr->opts.header_digest = true;
1104 	tqpair.qpair.ctrlr->opts.data_digest = true;
1105 	TAILQ_INIT(&tqpair.send_queue);
1106 
1107 	rc = nvme_tcp_qpair_icreq_send(&tqpair);
1108 	CU_ASSERT(rc == 0);
1109 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1110 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1111 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1112 	CU_ASSERT(ic_req->pfv == 0);
1113 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1114 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1115 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1116 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1117 }
1118 
1119 static void
1120 test_nvme_tcp_c2h_payload_handle(void)
1121 {
1122 	struct nvme_tcp_qpair tqpair = {};
1123 	struct spdk_nvme_tcp_stat stats = {};
1124 	struct nvme_tcp_pdu pdu = {};
1125 	struct nvme_tcp_req tcp_req = {};
1126 	struct nvme_request	req = {};
1127 	struct nvme_tcp_pdu recv_pdu = {};
1128 	uint32_t reaped = 1;
1129 
1130 	tcp_req.req = &req;
1131 	tcp_req.req->qpair = &tqpair.qpair;
1132 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1133 	tcp_req.tqpair = &tqpair;
1134 	tcp_req.cid = 1;
1135 	tqpair.stats = &stats;
1136 
1137 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1138 
1139 	pdu.req = &tcp_req;
1140 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1141 					SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1142 	pdu.data_len = 1024;
1143 
1144 	tqpair.qpair.id = 1;
1145 	tqpair.recv_pdu = &recv_pdu;
1146 
1147 	/* case 1: nvme_tcp_c2h_data_payload_handle: tcp_req->datao != tcp_req->req->payload_size */
1148 	tcp_req.datao = 1024;
1149 	tcp_req.req->payload_size = 2048;
1150 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1151 	tcp_req.ordering.bits.send_ack = 1;
1152 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1153 	tcp_req.ordering.bits.data_recv = 0;
1154 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1155 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1156 	tqpair.qpair.num_outstanding_reqs = 1;
1157 
1158 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1159 
1160 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1161 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1162 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1163 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1164 	CU_ASSERT(reaped == 2);
1165 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1166 
1167 	/* case 2: nvme_tcp_c2h_data_payload_handle: tcp_req->datao == tcp_req->req->payload_size */
1168 	tcp_req.datao = 1024;
1169 	tcp_req.req->payload_size = 1024;
1170 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1171 	tcp_req.ordering.bits.send_ack = 1;
1172 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1173 	tcp_req.ordering.bits.data_recv = 0;
1174 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1175 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1176 	tqpair.qpair.num_outstanding_reqs = 1;
1177 
1178 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1179 
1180 	CU_ASSERT(tcp_req.rsp.status.p == 1);
1181 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1182 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1183 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1184 	CU_ASSERT(reaped == 3);
1185 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1186 
1187 	/* case 3: nvme_tcp_c2h_data_payload_handle: flag does not have SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS */
1188 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1189 	tcp_req.datao = 1024;
1190 	tcp_req.req->payload_size = 1024;
1191 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1192 	tcp_req.ordering.bits.send_ack = 1;
1193 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1194 	tcp_req.ordering.bits.data_recv = 0;
1195 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1196 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1197 	tqpair.qpair.num_outstanding_reqs = 1;
1198 
1199 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1200 
1201 	CU_ASSERT(reaped == 3);
1202 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1203 
1204 	/* case 4: nvme_tcp_c2h_term_req_payload_handle: recv_state is NVME_TCP_PDU_RECV_STATE_ERROR */
1205 	pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1206 	nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
1207 
1208 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1209 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1210 }
1211 
1212 static void
1213 test_nvme_tcp_icresp_handle(void)
1214 {
1215 	struct nvme_tcp_qpair tqpair = {};
1216 	struct spdk_nvme_tcp_stat stats = {};
1217 	struct nvme_tcp_pdu pdu = {};
1218 	struct nvme_tcp_pdu send_pdu = {};
1219 	struct nvme_tcp_pdu recv_pdu = {};
1220 
1221 	tqpair.send_pdu = &send_pdu;
1222 	tqpair.recv_pdu = &recv_pdu;
1223 	tqpair.stats = &stats;
1224 	TAILQ_INIT(&tqpair.send_queue);
1225 
1226 	/* case 1: Expected ICResp PFV and got are different. */
1227 	pdu.hdr.ic_resp.pfv = 1;
1228 
1229 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1230 
1231 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1232 
1233 	/* case 2: Expected ICResp maxh2cdata and got are different. */
1234 	pdu.hdr.ic_resp.pfv = 0;
1235 	pdu.hdr.ic_resp.maxh2cdata = 2048;
1236 
1237 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1238 
1239 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1240 
1241 	/* case 3: Expected ICResp cpda and got are different. */
1242 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1243 	pdu.hdr.ic_resp.cpda = 64;
1244 
1245 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1246 
1247 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1248 
1249 	/* case 4: waiting icreq ack. */
1250 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1251 	pdu.hdr.ic_resp.cpda = 30;
1252 	pdu.hdr.ic_resp.dgst.bits.hdgst_enable = true;
1253 	pdu.hdr.ic_resp.dgst.bits.ddgst_enable = true;
1254 	tqpair.flags.icreq_send_ack = 0;
1255 
1256 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1257 
1258 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1259 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_INITIALIZING);
1260 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1261 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1262 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1263 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1264 
1265 	/* case 5: Expect: PASS. */
1266 	tqpair.flags.icreq_send_ack = 1;
1267 
1268 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1269 
1270 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1271 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND);
1272 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1273 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1274 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1275 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1276 }
1277 
1278 static void
1279 test_nvme_tcp_pdu_payload_handle(void)
1280 {
1281 	struct nvme_tcp_qpair	tqpair = {};
1282 	struct spdk_nvme_tcp_stat	stats = {};
1283 	struct nvme_tcp_pdu	recv_pdu = {};
1284 	struct nvme_tcp_req	tcp_req = {};
1285 	struct nvme_request	req = {};
1286 	uint32_t		reaped = 0;
1287 
1288 	tqpair.recv_pdu = &recv_pdu;
1289 	tcp_req.tqpair = &tqpair;
1290 	tcp_req.req = &req;
1291 	tcp_req.req->qpair = &tqpair.qpair;
1292 	tqpair.stats = &stats;
1293 
1294 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1295 	tqpair.qpair.id = 1;
1296 	recv_pdu.ddgst_enable = false;
1297 	recv_pdu.req = &tcp_req;
1298 	recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1299 					     SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1300 	recv_pdu.data_len = 1024;
1301 	tcp_req.ordering.bits.data_recv = 0;
1302 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1303 	tcp_req.cid = 1;
1304 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1305 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1306 	tqpair.qpair.num_outstanding_reqs = 1;
1307 
1308 	/* C2H_DATA */
1309 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
1310 	tcp_req.datao = 1024;
1311 	tcp_req.req->payload_size = 2048;
1312 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1313 	tcp_req.ordering.bits.send_ack = 1;
1314 
1315 	recv_pdu.req = &tcp_req;
1316 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1317 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1318 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1319 	CU_ASSERT(tcp_req.rsp.cid == 1);
1320 	CU_ASSERT(tcp_req.rsp.sqid == 1);
1321 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1322 	CU_ASSERT(reaped == 1);
1323 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1324 
1325 	/* TermResp */
1326 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1327 	recv_pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1328 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1329 
1330 	recv_pdu.req = &tcp_req;
1331 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1332 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1333 }
1334 
1335 static void
1336 test_nvme_tcp_capsule_resp_hdr_handle(void)
1337 {
1338 	struct nvme_tcp_qpair	tqpair = {};
1339 	struct spdk_nvme_ctrlr	ctrlr = {};
1340 	struct spdk_nvme_tcp_stat	stats = {};
1341 	struct nvme_request	req = {};
1342 	struct spdk_nvme_cpl	rccqe_tgt = {};
1343 	struct nvme_tcp_req	*tcp_req = NULL;
1344 	uint32_t		reaped = 0;
1345 	int			rc;
1346 
1347 	/* Initialize requests and pdus */
1348 	tqpair.num_entries = 1;
1349 	tqpair.stats = &stats;
1350 	req.qpair = &tqpair.qpair;
1351 	req.qpair->ctrlr = &ctrlr;
1352 	req.payload = NVME_PAYLOAD_CONTIG(NULL, NULL);
1353 
1354 	rc = nvme_tcp_alloc_reqs(&tqpair);
1355 	SPDK_CU_ASSERT_FATAL(rc == 0);
1356 	tcp_req = nvme_tcp_req_get(&tqpair);
1357 	SPDK_CU_ASSERT_FATAL(tcp_req != NULL);
1358 	rc = nvme_tcp_req_init(&tqpair, &req, tcp_req);
1359 	SPDK_CU_ASSERT_FATAL(rc == 0);
1360 	tcp_req->ordering.bits.send_ack = 1;
1361 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1362 	/* tqpair.recv_pdu will be reseted after handling */
1363 	memset(&rccqe_tgt, 0xff, sizeof(rccqe_tgt));
1364 	rccqe_tgt.cid = 0;
1365 	memcpy(&tqpair.recv_pdu->hdr.capsule_resp.rccqe, &rccqe_tgt, sizeof(rccqe_tgt));
1366 	tqpair.qpair.num_outstanding_reqs = 1;
1367 
1368 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1369 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1370 	CU_ASSERT(!memcmp(&tcp_req->rsp, &rccqe_tgt, sizeof(rccqe_tgt)));
1371 	CU_ASSERT(tcp_req->ordering.bits.data_recv == 1);
1372 	CU_ASSERT(reaped == 1);
1373 	CU_ASSERT(TAILQ_EMPTY(&tcp_req->tqpair->outstanding_reqs));
1374 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1375 
1376 	/* Get tcp request error, expect fail */
1377 	reaped = 0;
1378 	tqpair.recv_pdu->hdr.capsule_resp.rccqe.cid = 1;
1379 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1380 
1381 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1382 	CU_ASSERT(reaped == 0);
1383 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1384 	nvme_tcp_free_reqs(&tqpair);
1385 }
1386 
1387 static void
1388 test_nvme_tcp_ctrlr_connect_qpair(void)
1389 {
1390 	struct spdk_nvme_ctrlr ctrlr = {};
1391 	struct spdk_nvme_qpair *qpair;
1392 	struct nvme_tcp_qpair *tqpair;
1393 	struct nvme_tcp_pdu pdu = {};
1394 	struct nvme_tcp_pdu recv_pdu = {};
1395 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1396 	int rc;
1397 
1398 	tqpair = calloc(1, sizeof(*tqpair));
1399 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1400 	tqpair->recv_pdu = &recv_pdu;
1401 	qpair = &tqpair->qpair;
1402 	tqpair->sock = (struct spdk_sock *)0xDEADBEEF;
1403 	tqpair->send_pdu = &pdu;
1404 	tqpair->qpair.ctrlr = &ctrlr;
1405 	tqpair->qpair.state = NVME_QPAIR_CONNECTING;
1406 	tqpair->num_entries = 128;
1407 	ic_req = &pdu.hdr.ic_req;
1408 
1409 	tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1410 	tqpair->recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1411 	tqpair->recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1412 	tqpair->recv_pdu->ch_valid_bytes = sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1413 	tqpair->recv_pdu->psh_valid_bytes = tqpair->recv_pdu->hdr.common.hlen -
1414 					    sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1415 	tqpair->recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
1416 	tqpair->recv_pdu->hdr.ic_resp.cpda = 1;
1417 	tqpair->flags.icreq_send_ack = 1;
1418 	tqpair->qpair.ctrlr->opts.header_digest = true;
1419 	tqpair->qpair.ctrlr->opts.data_digest = true;
1420 	TAILQ_INIT(&tqpair->send_queue);
1421 
1422 	rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
1423 	CU_ASSERT(rc == 0);
1424 
1425 	/* skip NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY state */
1426 	/* assume already received the icresp */
1427 	tqpair->recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1428 
1429 	while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
1430 		rc = nvme_tcp_qpair_process_completions(qpair, 0);
1431 		CU_ASSERT(rc >= 0);
1432 	}
1433 
1434 	CU_ASSERT(tqpair->maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
1435 	CU_ASSERT(tqpair->state == NVME_TCP_QPAIR_STATE_RUNNING);
1436 	CU_ASSERT(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1437 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1438 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1439 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1440 	CU_ASSERT(ic_req->pfv == 0);
1441 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1442 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1443 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1444 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1445 
1446 	nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1447 }
1448 
1449 static void
1450 ut_disconnect_qpair_req_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
1451 {
1452 	CU_ASSERT_EQUAL(cpl->status.sc, SPDK_NVME_SC_ABORTED_SQ_DELETION);
1453 	CU_ASSERT_EQUAL(cpl->status.sct, SPDK_NVME_SCT_GENERIC);
1454 }
1455 
1456 static void
1457 ut_disconnect_qpair_poll_group_cb(struct spdk_nvme_qpair *qpair, void *ctx)
1458 {
1459 	int *disconnected = ctx;
1460 
1461 	(*disconnected)++;
1462 }
1463 
1464 static void
1465 test_nvme_tcp_ctrlr_disconnect_qpair(void)
1466 {
1467 	struct spdk_nvme_ctrlr ctrlr = {};
1468 	struct spdk_nvme_qpair *qpair;
1469 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {};
1470 	struct nvme_tcp_qpair tqpair = {
1471 		.qpair = {
1472 			.trtype = SPDK_NVME_TRANSPORT_TCP,
1473 			.ctrlr = &ctrlr,
1474 			.async = true,
1475 		},
1476 		.recv_pdu = &recv_pdu,
1477 	};
1478 	struct spdk_nvme_poll_group group = {};
1479 	struct nvme_tcp_poll_group tgroup = { .group.group = &group };
1480 	struct nvme_request req = { .qpair = &tqpair.qpair, .cb_fn = ut_disconnect_qpair_req_cb };
1481 	struct nvme_tcp_req treq = { .req = &req, .tqpair = &tqpair };
1482 	int rc, disconnected;
1483 
1484 	qpair = &tqpair.qpair;
1485 	qpair->poll_group = &tgroup.group;
1486 	tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
1487 	tqpair.needs_poll = true;
1488 	TAILQ_INIT(&tgroup.needs_poll);
1489 	STAILQ_INIT(&tgroup.group.disconnected_qpairs);
1490 	TAILQ_INIT(&tqpair.send_queue);
1491 	TAILQ_INIT(&tqpair.free_reqs);
1492 	TAILQ_INIT(&tqpair.outstanding_reqs);
1493 	TAILQ_INSERT_TAIL(&tgroup.needs_poll, &tqpair, link);
1494 	TAILQ_INSERT_TAIL(&tqpair.send_queue, &pdu, tailq);
1495 
1496 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1497 
1498 	CU_ASSERT(tqpair.needs_poll == false);
1499 	CU_ASSERT(tqpair.sock == NULL);
1500 	CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue) == true);
1501 
1502 	/* Check that outstanding requests are aborted */
1503 	treq.state = NVME_TCP_REQ_ACTIVE;
1504 	qpair->num_outstanding_reqs = 1;
1505 	qpair->state = NVME_QPAIR_DISCONNECTING;
1506 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1507 
1508 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1509 
1510 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
1511 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 0);
1512 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.free_reqs));
1513 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1514 
1515 	/* Check that a request with an accel operation in progress won't be aborted until that
1516 	 * operation is completed */
1517 	treq.state = NVME_TCP_REQ_ACTIVE;
1518 	treq.ordering.bits.in_progress_accel = 1;
1519 	qpair->poll_group = NULL;
1520 	qpair->num_outstanding_reqs = 1;
1521 	qpair->state = NVME_QPAIR_DISCONNECTING;
1522 	TAILQ_REMOVE(&tqpair.free_reqs, &treq, link);
1523 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1524 
1525 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1526 
1527 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1528 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1529 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1530 
1531 	/* Check that a qpair will be transitioned to a DISCONNECTED state only once the accel
1532 	 * operation is completed */
1533 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1534 	CU_ASSERT_EQUAL(rc, 0);
1535 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1536 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1537 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1538 
1539 	treq.ordering.bits.in_progress_accel = 0;
1540 	qpair->num_outstanding_reqs = 0;
1541 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1542 
1543 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1544 	CU_ASSERT_EQUAL(rc, -ENXIO);
1545 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1546 
1547 	/* Check the same scenario but this time with spdk_sock_flush() returning errors */
1548 	treq.state = NVME_TCP_REQ_ACTIVE;
1549 	treq.ordering.bits.in_progress_accel = 1;
1550 	qpair->num_outstanding_reqs = 1;
1551 	qpair->state = NVME_QPAIR_DISCONNECTING;
1552 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1553 
1554 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1555 
1556 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1557 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1558 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1559 
1560 	MOCK_SET(spdk_sock_flush, -ENOTCONN);
1561 	treq.ordering.bits.in_progress_accel = 0;
1562 	qpair->num_outstanding_reqs = 0;
1563 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1564 
1565 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1566 	CU_ASSERT_EQUAL(rc, 0);
1567 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1568 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1569 	CU_ASSERT_EQUAL(rc, -ENXIO);
1570 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1571 	MOCK_CLEAR(spdk_sock_flush);
1572 
1573 	/* Now check the same scenario, but with a qpair that's part of a poll group */
1574 	disconnected = 0;
1575 	group.ctx = &disconnected;
1576 	treq.state = NVME_TCP_REQ_ACTIVE;
1577 	treq.ordering.bits.in_progress_accel = 1;
1578 	qpair->poll_group = &tgroup.group;
1579 	qpair->num_outstanding_reqs = 1;
1580 	qpair->state = NVME_QPAIR_DISCONNECTING;
1581 	STAILQ_INSERT_TAIL(&tgroup.group.disconnected_qpairs, qpair, poll_group_stailq);
1582 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1583 
1584 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1585 						ut_disconnect_qpair_poll_group_cb);
1586 	/* Until there's an outstanding request, disconnect_cb shouldn't be executed */
1587 	CU_ASSERT_EQUAL(disconnected, 0);
1588 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1589 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1590 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1591 
1592 	treq.ordering.bits.in_progress_accel = 0;
1593 	qpair->num_outstanding_reqs = 0;
1594 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1595 
1596 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1597 						ut_disconnect_qpair_poll_group_cb);
1598 	CU_ASSERT_EQUAL(disconnected, 1);
1599 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1600 
1601 	/* Check that a non-async qpair is marked as disconnected immediately */
1602 	qpair->poll_group = NULL;
1603 	qpair->state = NVME_QPAIR_DISCONNECTING;
1604 	qpair->async = false;
1605 
1606 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1607 
1608 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1609 }
1610 
1611 static void
1612 test_nvme_tcp_ctrlr_create_io_qpair(void)
1613 {
1614 	struct spdk_nvme_qpair *qpair = NULL;
1615 	struct nvme_tcp_ctrlr tctrlr = {};
1616 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1617 	uint16_t qid = 1;
1618 	struct spdk_nvme_io_qpair_opts opts = {
1619 		.io_queue_size = 2,
1620 		.qprio = SPDK_NVME_QPRIO_URGENT,
1621 		.io_queue_requests = 1,
1622 	};
1623 	struct nvme_tcp_qpair *tqpair;
1624 
1625 	ctrlr->trid.priority = 1;
1626 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1627 	memset(ctrlr->opts.psk, 0, sizeof(ctrlr->opts.psk));
1628 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1629 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1630 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1631 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1632 
1633 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1634 	tqpair = nvme_tcp_qpair(qpair);
1635 
1636 	CU_ASSERT(qpair != NULL);
1637 	CU_ASSERT(qpair->id == 1);
1638 	CU_ASSERT(qpair->ctrlr == ctrlr);
1639 	CU_ASSERT(qpair->qprio == SPDK_NVME_QPRIO_URGENT);
1640 	CU_ASSERT(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
1641 	CU_ASSERT(qpair->poll_group == (void *)0xDEADBEEF);
1642 	CU_ASSERT(tqpair->num_entries == 1);
1643 
1644 	free(tqpair->tcp_reqs);
1645 	spdk_free(tqpair->send_pdus);
1646 	free(tqpair);
1647 
1648 	/* Max queue size shall pass */
1649 	opts.io_queue_size = 0xffff;
1650 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1651 	tqpair = nvme_tcp_qpair(qpair);
1652 
1653 	CU_ASSERT(qpair != NULL);
1654 	CU_ASSERT(tqpair->num_entries == 0xfffe);
1655 
1656 	free(tqpair->tcp_reqs);
1657 	spdk_free(tqpair->send_pdus);
1658 	free(tqpair);
1659 
1660 	/* Queue size 0 shall fail */
1661 	opts.io_queue_size = 0;
1662 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1663 	CU_ASSERT(qpair == NULL);
1664 
1665 	/* Queue size 1 shall fail */
1666 	opts.io_queue_size = 1;
1667 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1668 	CU_ASSERT(qpair == NULL);
1669 }
1670 
1671 static void
1672 test_nvme_tcp_ctrlr_delete_io_qpair(void)
1673 {
1674 	struct spdk_nvme_ctrlr	ctrlr = {};
1675 	struct spdk_nvme_qpair *qpair;
1676 	struct nvme_tcp_qpair *tqpair;
1677 	struct nvme_tcp_req tcp_req = {};
1678 	struct nvme_request	req = {};
1679 	int rc;
1680 
1681 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1682 	tqpair->tcp_reqs = calloc(1, sizeof(struct nvme_tcp_req));
1683 	tqpair->send_pdus = calloc(1, sizeof(struct nvme_tcp_pdu));
1684 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1685 	qpair = &tqpair->qpair;
1686 	qpair->ctrlr = &ctrlr;
1687 	tcp_req.req = &req;
1688 	tcp_req.req->qpair = &tqpair->qpair;
1689 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1690 	tcp_req.tqpair = tqpair;
1691 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1692 	TAILQ_INIT(&tqpair->outstanding_reqs);
1693 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1694 	qpair->num_outstanding_reqs = 1;
1695 
1696 	rc = nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1697 
1698 	CU_ASSERT(rc == 0);
1699 }
1700 
1701 static void
1702 test_nvme_tcp_poll_group_get_stats(void)
1703 {
1704 	int rc = 0;
1705 	struct spdk_sock_group sgroup = {};
1706 	struct nvme_tcp_poll_group *pgroup = NULL;
1707 	struct spdk_nvme_transport_poll_group *tgroup = NULL;
1708 	struct spdk_nvme_transport_poll_group_stat *tgroup_stat = NULL;
1709 
1710 	MOCK_SET(spdk_sock_group_create, &sgroup);
1711 	tgroup = nvme_tcp_poll_group_create();
1712 	CU_ASSERT(tgroup != NULL);
1713 	pgroup = nvme_tcp_poll_group(tgroup);
1714 	CU_ASSERT(pgroup != NULL);
1715 
1716 	/* Invalid group pointer, expect fail and return -EINVAL */
1717 	rc = nvme_tcp_poll_group_get_stats(NULL, &tgroup_stat);
1718 	CU_ASSERT(rc == -EINVAL);
1719 	CU_ASSERT(tgroup_stat == NULL);
1720 
1721 	/* Invalid stats, expect fail and return -EINVAL */
1722 	rc = nvme_tcp_poll_group_get_stats(tgroup, NULL);
1723 	CU_ASSERT(rc == -EINVAL);
1724 
1725 	/* Get stats success */
1726 	rc = nvme_tcp_poll_group_get_stats(tgroup, &tgroup_stat);
1727 	CU_ASSERT(rc == 0);
1728 	CU_ASSERT(tgroup_stat != NULL);
1729 	CU_ASSERT(tgroup_stat->trtype == SPDK_NVME_TRANSPORT_TCP);
1730 	CU_ASSERT(memcmp(&tgroup_stat->tcp, &pgroup->stats, sizeof(struct spdk_nvme_tcp_stat)) == 0);
1731 
1732 	nvme_tcp_poll_group_free_stats(tgroup, tgroup_stat);
1733 	rc = nvme_tcp_poll_group_destroy(tgroup);
1734 	CU_ASSERT(rc == 0);
1735 
1736 	MOCK_CLEAR(spdk_sock_group_create);
1737 }
1738 
1739 static void
1740 test_nvme_tcp_ctrlr_construct(void)
1741 {
1742 	struct nvme_tcp_qpair *tqpair = NULL;
1743 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1744 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1745 	struct spdk_nvme_transport_id trid = {
1746 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1747 		.priority = 1,
1748 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1749 		.traddr = "192.168.1.78",
1750 		.trsvcid = "23",
1751 	};
1752 	struct spdk_nvme_ctrlr_opts opts = {
1753 		.admin_queue_size = 2,
1754 		.src_addr = "192.168.1.77",
1755 		.src_svcid = "23",
1756 	};
1757 
1758 	/* Transmit ACK timeout value exceeds max, expected to pass and using max */
1759 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1760 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDEADBEEF);
1761 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1762 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1763 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1764 
1765 	CU_ASSERT(ctrlr != NULL);
1766 	CU_ASSERT(tctrlr != NULL);
1767 	CU_ASSERT(tqpair != NULL);
1768 	CU_ASSERT(ctrlr->opts.transport_ack_timeout == NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
1769 	CU_ASSERT(memcmp(&ctrlr->trid, &trid, sizeof(struct spdk_nvme_transport_id)) == 0);
1770 	CU_ASSERT(tqpair->num_entries == 1);
1771 	CU_ASSERT(TAILQ_EMPTY(&tqpair->send_queue));
1772 	CU_ASSERT(TAILQ_EMPTY(&tqpair->outstanding_reqs));
1773 	CU_ASSERT(!TAILQ_EMPTY(&tqpair->free_reqs));
1774 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs) == &tqpair->tcp_reqs[0]);
1775 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->cid == 0);
1776 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->tqpair == tqpair);
1777 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->pdu == &tqpair->send_pdus[0]);
1778 	CU_ASSERT(tqpair->send_pdu == &tqpair->send_pdus[1]);
1779 	CU_ASSERT(tqpair->recv_pdu == &tqpair->send_pdus[2]);
1780 
1781 	free(tqpair->tcp_reqs);
1782 	spdk_free(tqpair->send_pdus);
1783 	free(tqpair);
1784 	free(tctrlr);
1785 
1786 	/* The Admin queue size is less than the minimum required size, expected to create Admin qpair failed */
1787 	opts.admin_queue_size = 1;
1788 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1789 	CU_ASSERT(ctrlr == NULL);
1790 
1791 	/* Unhandled ADRFAM, expected to create Admin qpair failed */
1792 	opts.admin_queue_size = 2;
1793 	trid.adrfam = SPDK_NVMF_ADRFAM_INTRA_HOST;
1794 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1795 	CU_ASSERT(ctrlr == NULL);
1796 
1797 	/* Error connecting socket, expected to create Admin qpair failed */
1798 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1799 	MOCK_SET(spdk_sock_connect_ext, NULL);
1800 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1801 	CU_ASSERT(ctrlr == NULL);
1802 
1803 	MOCK_CLEAR(spdk_sock_connect_ext);
1804 }
1805 
1806 static void
1807 test_nvme_tcp_qpair_submit_request(void)
1808 {
1809 	int rc = 0;
1810 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1811 	struct nvme_tcp_qpair *tqpair = NULL;
1812 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1813 	struct nvme_tcp_req *tcp_req = NULL;
1814 	struct nvme_request req = {};
1815 	struct nvme_tcp_ut_bdev_io bio = {};
1816 	struct spdk_nvme_tcp_stat stat = {};
1817 	struct spdk_nvme_transport_id trid = {
1818 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1819 		.priority = 1,
1820 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1821 		.traddr = "192.168.1.78",
1822 		.trsvcid = "23",
1823 	};
1824 	struct spdk_nvme_ctrlr_opts opts = {
1825 		.admin_queue_size = 2,
1826 		.src_addr = "192.168.1.77",
1827 		.src_svcid = "23",
1828 	};
1829 
1830 	/* Construct TCP Controller */
1831 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1832 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDCADBEEF);
1833 
1834 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1835 	CU_ASSERT(ctrlr != NULL);
1836 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1837 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1838 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
1839 	CU_ASSERT(tctrlr != NULL);
1840 	CU_ASSERT(tqpair != NULL);
1841 	CU_ASSERT(tcp_req->pdu != NULL);
1842 	CU_ASSERT(tqpair->num_entries == 1);
1843 
1844 	tqpair->stats = &stat;
1845 	req.qpair = &tqpair->qpair;
1846 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
1847 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
1848 
1849 	/* Failed to construct request, because not enough max_sges */
1850 	req.qpair->ctrlr->max_sges = 1;
1851 	req.payload_size = 2048;
1852 	req.payload_offset = 0;
1853 	bio.iovpos = 0;
1854 	bio.iovs[0].iov_len = 1024;
1855 	bio.iovs[1].iov_len = 1024;
1856 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
1857 	bio.iovs[1].iov_base = (void *)0xDFADBEEF;
1858 
1859 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1860 	CU_ASSERT(rc == -1);
1861 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->free_reqs));
1862 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_FREE);
1863 
1864 	/* Multiple SGL, expected to pass */
1865 	req.qpair->ctrlr->max_sges = 2;
1866 
1867 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1868 	CU_ASSERT(rc == 0);
1869 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1870 	CU_ASSERT(NULL == TAILQ_FIRST(&tqpair->free_reqs));
1871 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->outstanding_reqs));
1872 	CU_ASSERT(tcp_req->expected_datao == 0);
1873 	CU_ASSERT(tcp_req->req == &req);
1874 	CU_ASSERT(tcp_req->r2tl_remain == 0);
1875 	CU_ASSERT(tcp_req->r2tl_remain_next == 0);
1876 	CU_ASSERT(tcp_req->active_r2ts == 0);
1877 	CU_ASSERT(tcp_req->iovcnt == 2);
1878 	CU_ASSERT(tcp_req->ordering.raw == 0);
1879 	CU_ASSERT(req.cmd.cid == tcp_req->cid);
1880 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
1881 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
1882 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
1883 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
1884 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
1885 	CU_ASSERT(tcp_req->in_capsule_data == true);
1886 	CU_ASSERT(tcp_req->iov[0].iov_len == bio.iovs[0].iov_len);
1887 	CU_ASSERT(tcp_req->iov[1].iov_len == bio.iovs[1].iov_len);
1888 	CU_ASSERT(tcp_req->iov[0].iov_base == bio.iovs[0].iov_base);
1889 	CU_ASSERT(tcp_req->iov[1].iov_base == bio.iovs[1].iov_base);
1890 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
1891 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) == 0);
1892 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_DDGSTF) == 0);
1893 	CU_ASSERT(tcp_req->datao == 0);
1894 	CU_ASSERT(tcp_req->pdu->data_len == req.payload_size);
1895 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdo == sizeof(struct spdk_nvme_tcp_cmd));
1896 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.plen == sizeof(struct spdk_nvme_tcp_cmd) +
1897 		  req.payload_size);
1898 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_base == (void *)0xDEADBEEF);
1899 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_len == 1024);
1900 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_base == (void *)0xDFADBEEF);
1901 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_len == 1024);
1902 	CU_ASSERT(tcp_req->pdu->data_iovcnt == 2);
1903 
1904 	/* Request resource limit reached, expected to return -EAGAIN */
1905 	memset(&req, 0x00, sizeof(struct nvme_request));
1906 	CU_ASSERT(tqpair->stats->queued_requests == 0);
1907 
1908 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1909 	CU_ASSERT(rc == -EAGAIN);
1910 	CU_ASSERT(tqpair->stats->queued_requests == 1);
1911 
1912 	MOCK_CLEAR(spdk_sock_connect_ext);
1913 	free(tqpair->tcp_reqs);
1914 	spdk_free(tqpair->send_pdus);
1915 	free(tqpair);
1916 	free(tctrlr);
1917 }
1918 
1919 int
1920 main(int argc, char **argv)
1921 {
1922 	CU_pSuite	suite = NULL;
1923 	unsigned int	num_failures;
1924 
1925 	CU_initialize_registry();
1926 
1927 	suite = CU_add_suite("nvme_tcp", NULL, NULL);
1928 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
1929 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
1930 	CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
1931 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
1932 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
1933 	CU_ADD_TEST(suite, test_nvme_tcp_req_complete_safe);
1934 	CU_ADD_TEST(suite, test_nvme_tcp_req_get);
1935 	CU_ADD_TEST(suite, test_nvme_tcp_req_init);
1936 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_capsule_cmd_send);
1937 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_write_pdu);
1938 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_set_recv_state);
1939 	CU_ADD_TEST(suite, test_nvme_tcp_alloc_reqs);
1940 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_send_h2c_term_req);
1941 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_ch_handle);
1942 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_connect_sock);
1943 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_icreq_send);
1944 	CU_ADD_TEST(suite, test_nvme_tcp_c2h_payload_handle);
1945 	CU_ADD_TEST(suite, test_nvme_tcp_icresp_handle);
1946 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_payload_handle);
1947 	CU_ADD_TEST(suite, test_nvme_tcp_capsule_resp_hdr_handle);
1948 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_connect_qpair);
1949 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_disconnect_qpair);
1950 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_create_io_qpair);
1951 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_delete_io_qpair);
1952 	CU_ADD_TEST(suite, test_nvme_tcp_poll_group_get_stats);
1953 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_construct);
1954 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_submit_request);
1955 
1956 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1957 	CU_cleanup_registry();
1958 	return num_failures;
1959 }
1960