xref: /spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c (revision 91fcde065a5883d85ca1034a9a1b254e1eadbcad)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk/nvme.h"
9 
10 #include "spdk_internal/cunit.h"
11 
12 #include "common/lib/test_sock.c"
13 #include "nvme/nvme_internal.h"
14 #include "common/lib/nvme/common_stubs.h"
15 
16 /* nvme_transport_ctrlr_disconnect_qpair_done() stub is defined in common_stubs.h, but we need to
17  * override it here */
18 static void nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair);
19 #define nvme_transport_ctrlr_disconnect_qpair_done nvme_transport_ctrlr_disconnect_qpair_done_mocked
20 
21 #include "nvme/nvme_tcp.c"
22 
23 SPDK_LOG_REGISTER_COMPONENT(nvme)
24 
25 DEFINE_STUB(nvme_qpair_submit_request,
26 	    int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
27 
28 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
29 		struct spdk_nvme_qpair *qpair), 0);
30 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
31 	    int,
32 	    (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint),
33 	    0);
34 
35 DEFINE_STUB(spdk_sock_group_get_ctx,
36 	    void *,
37 	    (struct spdk_sock_group *group),
38 	    NULL);
39 
40 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
41 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
42 
43 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
44 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
45 
46 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
47 		struct spdk_nvme_cmd *cmd));
48 
49 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
50 		struct spdk_nvme_cpl *cpl));
51 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *key, void *buf, int len), 0);
52 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *key), NULL);
53 
54 DEFINE_STUB(spdk_memory_domain_get_system_domain, struct spdk_memory_domain *, (void), NULL);
55 DEFINE_STUB(spdk_memory_domain_translate_data, int,
56 	    (struct spdk_memory_domain *src_domain, void *src_domain_ctx,
57 	     struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
58 	     void *addr, size_t len, struct spdk_memory_domain_translation_result *result), 0);
59 DEFINE_STUB_V(spdk_memory_domain_invalidate_data, (struct spdk_memory_domain *domain,
60 		void *domain_ctx, struct iovec *iov, uint32_t iovcnt));
61 
62 static void
63 nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair)
64 {
65 	qpair->state = NVME_QPAIR_DISCONNECTED;
66 }
67 
68 static void
69 test_nvme_tcp_pdu_set_data_buf(void)
70 {
71 	struct nvme_tcp_pdu pdu = {};
72 	struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
73 	uint32_t data_len;
74 	uint64_t i;
75 
76 	/* 1st case: input is a single SGL entry. */
77 	iov[0].iov_base = (void *)0xDEADBEEF;
78 	iov[0].iov_len = 4096;
79 
80 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
81 
82 	CU_ASSERT(pdu.data_iovcnt == 1);
83 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
84 	CU_ASSERT(pdu.data_iov[0].iov_len == 512);
85 
86 	/* 2nd case: simulate split on multiple SGL entries. */
87 	iov[0].iov_base = (void *)0xDEADBEEF;
88 	iov[0].iov_len = 4096;
89 	iov[1].iov_base = (void *)0xFEEDBEEF;
90 	iov[1].iov_len = 512 * 7;
91 	iov[2].iov_base = (void *)0xF00DF00D;
92 	iov[2].iov_len = 4096 * 2;
93 
94 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
95 
96 	CU_ASSERT(pdu.data_iovcnt == 1);
97 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
98 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
99 
100 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
101 
102 	CU_ASSERT(pdu.data_iovcnt == 2);
103 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
104 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
105 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
106 	CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
107 
108 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
109 
110 	CU_ASSERT(pdu.data_iovcnt == 2);
111 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
112 	CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
113 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
114 	CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
115 
116 	/* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
117 	 * entries.
118 	 */
119 	data_len = 0;
120 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
121 		iov[i].iov_base = (void *)(0xDEADBEEF + i);
122 		iov[i].iov_len = 512 * (i + 1);
123 		data_len += 512 * (i + 1);
124 	}
125 
126 	nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
127 
128 	CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
129 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
130 		CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
131 		CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
132 	}
133 }
134 
135 static void
136 test_nvme_tcp_build_iovs(void)
137 {
138 	const uintptr_t pdu_iov_len = 4096;
139 	struct nvme_tcp_pdu pdu = {};
140 	struct iovec iovs[5] = {};
141 	uint32_t mapped_length = 0;
142 	int rc;
143 
144 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
145 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
146 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
147 			      SPDK_NVME_TCP_DIGEST_LEN;
148 	pdu.data_len = pdu_iov_len * 2;
149 	pdu.padding_len = 0;
150 
151 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
152 	pdu.data_iov[0].iov_len = pdu_iov_len;
153 	pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
154 	pdu.data_iov[1].iov_len = pdu_iov_len;
155 	pdu.data_iovcnt = 2;
156 
157 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
158 	CU_ASSERT(rc == 4);
159 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
160 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
161 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
162 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
163 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
164 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
165 	CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
166 	CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
167 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
168 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
169 
170 	/* Add a new data_iov entry, update pdu iov count and data length */
171 	pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
172 	pdu.data_iov[2].iov_len = 123;
173 	pdu.data_iovcnt = 3;
174 	pdu.data_len += 123;
175 	pdu.hdr.common.plen += 123;
176 
177 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
178 	CU_ASSERT(rc == 5);
179 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
180 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
181 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
182 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
183 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
184 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
185 	CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
186 	CU_ASSERT(iovs[3].iov_len == 123);
187 	CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
188 	CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
189 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
190 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
191 }
192 
193 struct nvme_tcp_ut_bdev_io {
194 	struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
195 	int iovpos;
196 };
197 
198 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
199 static void
200 nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
201 {
202 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
203 	struct iovec *iov;
204 
205 	for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
206 		iov = &bio->iovs[bio->iovpos];
207 		/* Offset must be aligned with the start of any SGL entry */
208 		if (offset == 0) {
209 			break;
210 		}
211 
212 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
213 		offset -= iov->iov_len;
214 	}
215 
216 	SPDK_CU_ASSERT_FATAL(offset == 0);
217 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
218 }
219 
220 static int
221 nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
222 {
223 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
224 	struct iovec *iov;
225 
226 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
227 
228 	iov = &bio->iovs[bio->iovpos];
229 
230 	*address = iov->iov_base;
231 	*length = iov->iov_len;
232 	bio->iovpos++;
233 
234 	return 0;
235 }
236 
237 static void
238 test_nvme_tcp_build_sgl_request(void)
239 {
240 	struct nvme_tcp_qpair tqpair;
241 	struct spdk_nvme_ctrlr ctrlr = {{0}};
242 	struct nvme_tcp_req tcp_req = {0};
243 	struct nvme_request req = {{0}};
244 	struct nvme_tcp_ut_bdev_io bio;
245 	uint64_t i;
246 	int rc;
247 
248 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
249 	tqpair.qpair.ctrlr = &ctrlr;
250 	tcp_req.req = &req;
251 
252 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
253 	req.qpair = &tqpair.qpair;
254 
255 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
256 		bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
257 		bio.iovs[i].iov_len = 0;
258 	}
259 
260 	/* Test case 1: Single SGL. Expected: PASS */
261 	bio.iovpos = 0;
262 	req.payload_offset = 0;
263 	req.payload_size = 0x1000;
264 	bio.iovs[0].iov_len = 0x1000;
265 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
266 	SPDK_CU_ASSERT_FATAL(rc == 0);
267 	CU_ASSERT(bio.iovpos == 1);
268 	CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
269 	CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
270 	CU_ASSERT(tcp_req.iovcnt == 1);
271 
272 	/* Test case 2: Multiple SGL. Expected: PASS */
273 	bio.iovpos = 0;
274 	req.payload_offset = 0;
275 	req.payload_size = 0x4000;
276 	for (i = 0; i < 4; i++) {
277 		bio.iovs[i].iov_len = 0x1000;
278 	}
279 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
280 	SPDK_CU_ASSERT_FATAL(rc == 0);
281 	CU_ASSERT(bio.iovpos == 4);
282 	CU_ASSERT(tcp_req.iovcnt == 4);
283 	for (i = 0; i < 4; i++) {
284 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
285 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
286 	}
287 
288 	/* Test case 3: Payload is bigger than SGL. Expected: FAIL */
289 	bio.iovpos = 0;
290 	req.payload_offset = 0;
291 	req.payload_size = 0x17000;
292 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
293 		bio.iovs[i].iov_len = 0x1000;
294 	}
295 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
296 	SPDK_CU_ASSERT_FATAL(rc != 0);
297 	CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
298 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
299 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
300 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
301 	}
302 }
303 
304 static void
305 test_nvme_tcp_pdu_set_data_buf_with_md(void)
306 {
307 	struct nvme_tcp_pdu pdu = {};
308 	struct iovec iovs[7] = {};
309 	struct spdk_dif_ctx dif_ctx = {};
310 	int rc;
311 	struct spdk_dif_ctx_init_ext_opts dif_opts;
312 
313 	pdu.dif_ctx = &dif_ctx;
314 
315 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
316 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
317 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
318 			       0, 0, 0, 0, 0, &dif_opts);
319 	CU_ASSERT(rc == 0);
320 
321 	/* Single iovec case */
322 	iovs[0].iov_base = (void *)0xDEADBEEF;
323 	iovs[0].iov_len = 2080;
324 
325 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
326 
327 	CU_ASSERT(dif_ctx.data_offset == 0);
328 	CU_ASSERT(pdu.data_len == 500);
329 	CU_ASSERT(pdu.data_iovcnt == 1);
330 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
331 	CU_ASSERT(pdu.data_iov[0].iov_len == 500);
332 
333 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
334 
335 	CU_ASSERT(dif_ctx.data_offset == 500);
336 	CU_ASSERT(pdu.data_len == 1000);
337 	CU_ASSERT(pdu.data_iovcnt == 1);
338 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
339 	CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
340 
341 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
342 
343 	CU_ASSERT(dif_ctx.data_offset == 1500);
344 	CU_ASSERT(pdu.data_len == 548);
345 	CU_ASSERT(pdu.data_iovcnt == 1);
346 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
347 	CU_ASSERT(pdu.data_iov[0].iov_len == 564);
348 
349 	/* Multiple iovecs case */
350 	iovs[0].iov_base = (void *)0xDEADBEEF;
351 	iovs[0].iov_len = 256;
352 	iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
353 	iovs[1].iov_len = 256 + 1;
354 	iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
355 	iovs[2].iov_len = 4;
356 	iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
357 	iovs[3].iov_len = 3 + 123;
358 	iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
359 	iovs[4].iov_len = 389 + 6;
360 	iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
361 	iovs[5].iov_len = 2 + 512 + 8 + 432;
362 	iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
363 	iovs[6].iov_len = 80 + 8;
364 
365 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
366 
367 	CU_ASSERT(dif_ctx.data_offset == 0);
368 	CU_ASSERT(pdu.data_len == 500);
369 	CU_ASSERT(pdu.data_iovcnt == 2);
370 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
371 	CU_ASSERT(pdu.data_iov[0].iov_len == 256);
372 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
373 	CU_ASSERT(pdu.data_iov[1].iov_len == 244);
374 
375 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
376 
377 	CU_ASSERT(dif_ctx.data_offset == 500);
378 	CU_ASSERT(pdu.data_len == 1000);
379 	CU_ASSERT(pdu.data_iovcnt == 5);
380 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
381 	CU_ASSERT(pdu.data_iov[0].iov_len == 13);
382 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
383 	CU_ASSERT(pdu.data_iov[1].iov_len == 4);
384 	CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
385 	CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
386 	CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
387 	CU_ASSERT(pdu.data_iov[3].iov_len == 395);
388 	CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
389 	CU_ASSERT(pdu.data_iov[4].iov_len == 478);
390 
391 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
392 
393 	CU_ASSERT(dif_ctx.data_offset == 1500);
394 	CU_ASSERT(pdu.data_len == 548);
395 	CU_ASSERT(pdu.data_iovcnt == 2);
396 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
397 	CU_ASSERT(pdu.data_iov[0].iov_len == 476);
398 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
399 	CU_ASSERT(pdu.data_iov[1].iov_len == 88);
400 }
401 
402 static void
403 test_nvme_tcp_build_iovs_with_md(void)
404 {
405 	struct nvme_tcp_pdu pdu = {};
406 	struct iovec iovs[11] = {};
407 	struct spdk_dif_ctx dif_ctx = {};
408 	uint32_t mapped_length = 0;
409 	int rc;
410 	struct spdk_dif_ctx_init_ext_opts dif_opts;
411 
412 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
413 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
414 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
415 			       0, 0, 0, 0, 0, &dif_opts);
416 	CU_ASSERT(rc == 0);
417 
418 	pdu.dif_ctx = &dif_ctx;
419 
420 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
421 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
422 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
423 			      SPDK_NVME_TCP_DIGEST_LEN;
424 	pdu.data_len = 512 * 8;
425 	pdu.padding_len = 0;
426 
427 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
428 	pdu.data_iov[0].iov_len = (512 + 8) * 8;
429 	pdu.data_iovcnt = 1;
430 
431 	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
432 	CU_ASSERT(rc == 10);
433 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
434 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
435 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
436 	CU_ASSERT(iovs[1].iov_len == 512);
437 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
438 	CU_ASSERT(iovs[2].iov_len == 512);
439 	CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
440 	CU_ASSERT(iovs[3].iov_len == 512);
441 	CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
442 	CU_ASSERT(iovs[4].iov_len == 512);
443 	CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
444 	CU_ASSERT(iovs[5].iov_len == 512);
445 	CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
446 	CU_ASSERT(iovs[6].iov_len == 512);
447 	CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
448 	CU_ASSERT(iovs[7].iov_len == 512);
449 	CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
450 	CU_ASSERT(iovs[8].iov_len == 512);
451 	CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
452 	CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
453 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
454 		  512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
455 }
456 
457 /* Just define, nothing to do */
458 static void
459 ut_nvme_complete_request(void *arg, const struct spdk_nvme_cpl *cpl)
460 {
461 	return;
462 }
463 
464 static void
465 test_nvme_tcp_req_complete_safe(void)
466 {
467 	bool rc;
468 	struct nvme_tcp_req	tcp_req = {0};
469 	struct nvme_request	req = {{0}};
470 	struct nvme_tcp_qpair	tqpair = {{0}};
471 
472 	tcp_req.req = &req;
473 	tcp_req.req->qpair = &tqpair.qpair;
474 	tcp_req.req->cb_fn = ut_nvme_complete_request;
475 	tcp_req.tqpair = &tqpair;
476 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
477 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
478 	tqpair.qpair.num_outstanding_reqs = 1;
479 
480 	/* Test case 1: send operation and transfer completed. Expect: PASS */
481 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
482 	tcp_req.ordering.bits.send_ack = 1;
483 	tcp_req.ordering.bits.data_recv = 1;
484 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
485 
486 	rc = nvme_tcp_req_complete_safe(&tcp_req);
487 	CU_ASSERT(rc == true);
488 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
489 
490 	/* Test case 2: send operation not completed. Expect: FAIL */
491 	tcp_req.ordering.raw = 0;
492 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
493 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
494 	tqpair.qpair.num_outstanding_reqs = 1;
495 
496 	rc = nvme_tcp_req_complete_safe(&tcp_req);
497 	SPDK_CU_ASSERT_FATAL(rc != true);
498 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
499 	TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
500 
501 	/* Test case 3: in completion context. Expect: PASS */
502 	tqpair.qpair.in_completion_context = 1;
503 	tqpair.async_complete = 0;
504 	tcp_req.ordering.bits.send_ack = 1;
505 	tcp_req.ordering.bits.data_recv = 1;
506 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
507 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
508 	tqpair.qpair.num_outstanding_reqs = 1;
509 
510 	rc = nvme_tcp_req_complete_safe(&tcp_req);
511 	CU_ASSERT(rc == true);
512 	CU_ASSERT(tcp_req.tqpair->async_complete == 0);
513 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
514 
515 	/* Test case 4: in async complete. Expect: PASS */
516 	tqpair.qpair.in_completion_context = 0;
517 	tcp_req.ordering.bits.send_ack = 1;
518 	tcp_req.ordering.bits.data_recv = 1;
519 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
520 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
521 	tqpair.qpair.num_outstanding_reqs = 1;
522 
523 	rc = nvme_tcp_req_complete_safe(&tcp_req);
524 	CU_ASSERT(rc == true);
525 	CU_ASSERT(tcp_req.tqpair->async_complete);
526 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
527 }
528 
529 static void
530 test_nvme_tcp_req_init(void)
531 {
532 	struct nvme_tcp_qpair tqpair = {};
533 	struct nvme_request req = {};
534 	struct nvme_tcp_req tcp_req = {0};
535 	struct spdk_nvme_ctrlr ctrlr = {{0}};
536 	struct nvme_tcp_ut_bdev_io bio = {};
537 	int rc;
538 
539 	tqpair.qpair.ctrlr = &ctrlr;
540 	req.qpair = &tqpair.qpair;
541 
542 	tcp_req.cid = 1;
543 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
544 	req.payload_offset = 0;
545 	req.payload_size = 4096;
546 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
547 	ctrlr.ioccsz_bytes = 1024;
548 	bio.iovpos = 0;
549 	bio.iovs[0].iov_len = 8192;
550 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
551 
552 	/* Test case1: payload type SGL. Expect: PASS */
553 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
554 	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
555 
556 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
557 	CU_ASSERT(rc == 0);
558 	CU_ASSERT(tcp_req.req == &req);
559 	CU_ASSERT(tcp_req.in_capsule_data == true);
560 	CU_ASSERT(tcp_req.iovcnt == 1);
561 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
562 	CU_ASSERT(tcp_req.iov[0].iov_base == bio.iovs[0].iov_base);
563 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
564 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
565 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
566 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
567 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
568 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
569 
570 	/* Test case2: payload type CONTIG. Expect: PASS */
571 	memset(&req.cmd, 0, sizeof(req.cmd));
572 	memset(&tcp_req, 0, sizeof(tcp_req));
573 	tcp_req.cid = 1;
574 	req.payload = NVME_PAYLOAD_CONTIG(&bio, NULL);
575 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
576 
577 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
578 	CU_ASSERT(rc == 0);
579 	CU_ASSERT(tcp_req.req == &req);
580 	CU_ASSERT(tcp_req.in_capsule_data == true);
581 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
582 	CU_ASSERT(tcp_req.iov[0].iov_base == &bio);
583 	CU_ASSERT(tcp_req.iovcnt == 1);
584 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
585 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
586 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
587 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
588 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
589 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
590 
591 }
592 
593 static void
594 test_nvme_tcp_req_get(void)
595 {
596 	struct nvme_tcp_req tcp_req = {0};
597 	struct nvme_tcp_qpair tqpair = {};
598 	struct nvme_tcp_pdu send_pdu = {};
599 
600 	tcp_req.pdu = &send_pdu;
601 	tcp_req.state = NVME_TCP_REQ_FREE;
602 
603 	TAILQ_INIT(&tqpair.free_reqs);
604 	TAILQ_INIT(&tqpair.outstanding_reqs);
605 	TAILQ_INSERT_HEAD(&tqpair.free_reqs, &tcp_req, link);
606 
607 	CU_ASSERT(nvme_tcp_req_get(&tqpair) == &tcp_req);
608 	CU_ASSERT(tcp_req.state == NVME_TCP_REQ_ACTIVE);
609 	CU_ASSERT(tcp_req.datao == 0);
610 	CU_ASSERT(tcp_req.req == NULL);
611 	CU_ASSERT(tcp_req.in_capsule_data == false);
612 	CU_ASSERT(tcp_req.r2tl_remain == 0);
613 	CU_ASSERT(tcp_req.iovcnt == 0);
614 	CU_ASSERT(tcp_req.ordering.raw == 0);
615 	/* outstanding_reqs should still be empty - caller is responsible
616 	 * for putting it on the TAILQ after any other initialization is
617 	 * completed.
618 	 */
619 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
620 	CU_ASSERT(TAILQ_EMPTY(&tqpair.free_reqs));
621 
622 	/* No tcp request available, expect fail */
623 	SPDK_CU_ASSERT_FATAL(nvme_tcp_req_get(&tqpair) == NULL);
624 }
625 
626 static void
627 test_nvme_tcp_qpair_capsule_cmd_send(void)
628 {
629 	struct nvme_tcp_qpair tqpair = {};
630 	struct spdk_nvme_tcp_stat stats = {};
631 	struct nvme_tcp_req tcp_req = {};
632 	struct nvme_tcp_pdu pdu = {};
633 	struct nvme_request req = {};
634 	char iov_base0[4096];
635 	char iov_base1[4096];
636 	uint32_t plen;
637 	uint8_t pdo;
638 
639 	memset(iov_base0, 0xFF, 4096);
640 	memset(iov_base1, 0xFF, 4096);
641 	tcp_req.req = &req;
642 	tcp_req.pdu = &pdu;
643 	TAILQ_INIT(&tqpair.send_queue);
644 	tqpair.stats = &stats;
645 
646 	tcp_req.iov[0].iov_base = (void *)iov_base0;
647 	tcp_req.iov[0].iov_len = 4096;
648 	tcp_req.iov[1].iov_base = (void *)iov_base1;
649 	tcp_req.iov[1].iov_len = 4096;
650 	tcp_req.iovcnt = 2;
651 	tcp_req.req->payload_size = 8192;
652 	tcp_req.in_capsule_data = true;
653 	tqpair.cpda = NVME_TCP_HPDA_DEFAULT;
654 
655 	/* Test case 1: host hdgst and ddgst enable. Expect: PASS */
656 	tqpair.flags.host_hdgst_enable = 1;
657 	tqpair.flags.host_ddgst_enable = 1;
658 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd) +
659 		     SPDK_NVME_TCP_DIGEST_LEN;
660 	plen += tcp_req.req->payload_size;
661 	plen += SPDK_NVME_TCP_DIGEST_LEN;
662 
663 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
664 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
665 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
666 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
667 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
668 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
669 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
670 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
671 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
672 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
673 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
674 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
675 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
676 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
677 
678 	/* Test case 2: host hdgst and ddgst disable. Expect: PASS */
679 	memset(&pdu, 0, sizeof(pdu));
680 	tqpair.flags.host_hdgst_enable = 0;
681 	tqpair.flags.host_ddgst_enable = 0;
682 
683 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd);
684 	plen += tcp_req.req->payload_size;
685 
686 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
687 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
688 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags == 0)
689 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
690 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
691 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
692 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
693 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
694 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
695 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
696 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
697 
698 	/* Test case 3: padding available. Expect: PASS */
699 	memset(&pdu, 0, sizeof(pdu));
700 	tqpair.flags.host_hdgst_enable = 1;
701 	tqpair.flags.host_ddgst_enable = 1;
702 	tqpair.cpda = SPDK_NVME_TCP_CPDA_MAX;
703 
704 	pdo = plen = (SPDK_NVME_TCP_CPDA_MAX + 1) << 2;
705 	plen += tcp_req.req->payload_size;
706 	plen += SPDK_NVME_TCP_DIGEST_LEN;
707 
708 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
709 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
710 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
711 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
712 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
713 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
714 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
715 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
716 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
717 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
718 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
719 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
720 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
721 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
722 }
723 
724 /* Just define, nothing to do */
725 static void
726 ut_nvme_tcp_qpair_xfer_complete_cb(void *cb_arg)
727 {
728 	return;
729 }
730 
731 static void
732 test_nvme_tcp_qpair_write_pdu(void)
733 {
734 	struct nvme_tcp_qpair tqpair = {};
735 	struct spdk_nvme_tcp_stat stats = {};
736 	struct nvme_request req = {};
737 	struct nvme_tcp_req treq = { .req = &req };
738 	struct nvme_tcp_pdu pdu = { .req = &treq };
739 	void *cb_arg = (void *)0xDEADBEEF;
740 	char iov_base0[4096];
741 	char iov_base1[4096];
742 
743 	memset(iov_base0, 0xFF, 4096);
744 	memset(iov_base1, 0xFF, 4096);
745 	pdu.data_len = 4096 * 2;
746 	pdu.padding_len = 0;
747 	pdu.data_iov[0].iov_base = (void *)iov_base0;
748 	pdu.data_iov[0].iov_len = 4096;
749 	pdu.data_iov[1].iov_base = (void *)iov_base1;
750 	pdu.data_iov[1].iov_len = 4096;
751 	pdu.data_iovcnt = 2;
752 	TAILQ_INIT(&tqpair.send_queue);
753 
754 	/* Test case1: host hdgst and ddgst enable Expect: PASS */
755 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
756 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
757 
758 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
759 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
760 	pdu.hdr.common.plen = pdu.hdr.common.hlen +
761 			      SPDK_NVME_TCP_DIGEST_LEN * 2 ;
762 	pdu.hdr.common.plen += pdu.data_len;
763 	tqpair.flags.host_hdgst_enable = 1;
764 	tqpair.flags.host_ddgst_enable = 1;
765 	tqpair.stats = &stats;
766 
767 	nvme_tcp_qpair_write_pdu(&tqpair,
768 				 &pdu,
769 				 ut_nvme_tcp_qpair_xfer_complete_cb,
770 				 cb_arg);
771 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
772 	/* Check the crc data of header digest filled into raw */
773 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen]);
774 	CU_ASSERT(pdu.data_digest[0]);
775 	CU_ASSERT(pdu.sock_req.iovcnt == 4);
776 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
777 	CU_ASSERT(pdu.iov[0].iov_len == (sizeof(struct spdk_nvme_tcp_cmd) +
778 					 SPDK_NVME_TCP_DIGEST_LEN));
779 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
780 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
781 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
782 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
783 	CU_ASSERT(pdu.iov[3].iov_base == &pdu.data_digest);
784 	CU_ASSERT(pdu.iov[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
785 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
786 	CU_ASSERT(pdu.cb_arg == cb_arg);
787 	CU_ASSERT(pdu.qpair == &tqpair);
788 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
789 
790 	/* Test case2: host hdgst and ddgst disable Expect: PASS */
791 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
792 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
793 
794 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
795 	pdu.hdr.common.plen = pdu.hdr.common.hlen  + pdu.data_len;
796 	tqpair.flags.host_hdgst_enable = 0;
797 	tqpair.flags.host_ddgst_enable = 0;
798 
799 	nvme_tcp_qpair_write_pdu(&tqpair,
800 				 &pdu,
801 				 ut_nvme_tcp_qpair_xfer_complete_cb,
802 				 cb_arg);
803 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
804 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen] == 0);
805 	CU_ASSERT(pdu.data_digest[0] == 0);
806 	CU_ASSERT(pdu.sock_req.iovcnt == 3);
807 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
808 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd));
809 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
810 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
811 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
812 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
813 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
814 	CU_ASSERT(pdu.cb_arg == cb_arg);
815 	CU_ASSERT(pdu.qpair == &tqpair);
816 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
817 }
818 
819 static void
820 test_nvme_tcp_qpair_set_recv_state(void)
821 {
822 	struct nvme_tcp_qpair tqpair = {};
823 
824 	/* case1: The recv state of tqpair is same with the state to be set */
825 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
826 	nvme_tcp_qpair_set_recv_state(&tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
827 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
828 
829 	/* Different state will be set accordingly */
830 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY;
831 	nvme_tcp_qpair_set_recv_state(&tqpair, 0xff);
832 	CU_ASSERT(tqpair.recv_state == 0xff);
833 }
834 
835 static void
836 test_nvme_tcp_alloc_reqs(void)
837 {
838 	struct nvme_tcp_qpair tqpair = {};
839 	int rc = 0;
840 
841 	/* case1: single entry. Expect: PASS */
842 	tqpair.num_entries = 1;
843 	rc = nvme_tcp_alloc_reqs(&tqpair);
844 	CU_ASSERT(rc == 0);
845 	CU_ASSERT(tqpair.tcp_reqs[0].cid == 0);
846 	CU_ASSERT(tqpair.tcp_reqs[0].tqpair == &tqpair);
847 	CU_ASSERT(tqpair.tcp_reqs[0].pdu == &tqpair.send_pdus[0]);
848 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
849 	free(tqpair.tcp_reqs);
850 	spdk_free(tqpair.send_pdus);
851 
852 	/* case2: multiple entries. Expect: PASS */
853 	tqpair.num_entries = 5;
854 	rc = nvme_tcp_alloc_reqs(&tqpair);
855 	CU_ASSERT(rc == 0);
856 	for (int i = 0; i < tqpair.num_entries; i++) {
857 		CU_ASSERT(tqpair.tcp_reqs[i].cid == i);
858 		CU_ASSERT(tqpair.tcp_reqs[i].tqpair == &tqpair);
859 		CU_ASSERT(tqpair.tcp_reqs[i].pdu == &tqpair.send_pdus[i]);
860 	}
861 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
862 
863 	/* case3: Test nvme_tcp_free_reqs test. Expect: PASS */
864 	nvme_tcp_free_reqs(&tqpair);
865 	CU_ASSERT(tqpair.tcp_reqs == NULL);
866 	CU_ASSERT(tqpair.send_pdus == NULL);
867 }
868 
869 static void
870 test_nvme_tcp_qpair_send_h2c_term_req(void)
871 {
872 	struct nvme_tcp_qpair tqpair = {};
873 	struct spdk_nvme_tcp_stat stats = {};
874 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
875 	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
876 	uint32_t error_offset = 1;
877 
878 	tqpair.send_pdu = &send_pdu;
879 	tqpair.recv_pdu = &recv_pdu;
880 	tqpair.stats = &stats;
881 	TAILQ_INIT(&tqpair.send_queue);
882 	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
883 	pdu.hdr.common.hlen = 64;
884 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
885 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
886 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
887 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
888 		  pdu.hdr.common.hlen);
889 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
890 
891 	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
892 	pdu.hdr.common.hlen = 255;
893 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
894 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
895 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
896 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
897 		  tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
898 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
899 }
900 
901 static void
902 test_nvme_tcp_pdu_ch_handle(void)
903 {
904 	struct nvme_tcp_qpair tqpair = {};
905 	struct spdk_nvme_tcp_stat stats = {};
906 	struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
907 
908 	tqpair.send_pdu = &send_pdu;
909 	tqpair.recv_pdu = &recv_pdu;
910 	tqpair.stats = &stats;
911 	TAILQ_INIT(&tqpair.send_queue);
912 	/* case 1: Already received IC_RESP PDU. Expect: fail */
913 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
914 	tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
915 	nvme_tcp_pdu_ch_handle(&tqpair);
916 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
917 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
918 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
919 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
920 
921 	/* case 2: Expected PDU header length and received are different. Expect: fail */
922 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
923 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
924 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
925 	tqpair.recv_pdu->hdr.common.hlen = 0;
926 	nvme_tcp_pdu_ch_handle(&tqpair);
927 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
928 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
929 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
930 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
931 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
932 
933 	/* case 3: The TCP/IP tqpair connection is not negotiated. Expect: fail */
934 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
935 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
936 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
937 	tqpair.recv_pdu->hdr.common.hlen = 0;
938 	nvme_tcp_pdu_ch_handle(&tqpair);
939 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
940 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
941 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
942 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
943 
944 	/* case 4: Unexpected PDU type. Expect: fail */
945 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
946 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
947 	tqpair.recv_pdu->hdr.common.plen = 0;
948 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
949 	nvme_tcp_pdu_ch_handle(&tqpair);
950 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
951 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
952 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
953 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
954 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
955 
956 	/* case 5: plen error. Expect: fail */
957 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
958 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
959 	tqpair.recv_pdu->hdr.common.plen = 0;
960 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
961 	nvme_tcp_pdu_ch_handle(&tqpair);
962 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
963 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
964 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
965 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
966 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
967 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
968 
969 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
970 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
971 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
972 	tqpair.recv_pdu->hdr.common.plen = 0;
973 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
974 	nvme_tcp_pdu_ch_handle(&tqpair);
975 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
976 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
977 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
978 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
979 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
980 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
981 
982 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
983 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
984 	tqpair.recv_pdu->hdr.common.plen = 0;
985 	tqpair.recv_pdu->hdr.common.pdo = 64;
986 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
987 	nvme_tcp_pdu_ch_handle(&tqpair);
988 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
989 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
990 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
991 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
992 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
993 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
994 
995 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
996 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
997 	tqpair.recv_pdu->hdr.common.plen = 0;
998 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
999 	nvme_tcp_pdu_ch_handle(&tqpair);
1000 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1001 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1002 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1003 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1004 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1005 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1006 
1007 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1008 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1009 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1010 	tqpair.recv_pdu->hdr.common.plen = 0;
1011 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
1012 	nvme_tcp_pdu_ch_handle(&tqpair);
1013 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1014 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1015 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1016 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1017 		  (unsigned)sizeof(struct spdk_nvme_tcp_r2t_hdr));
1018 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1019 
1020 	/* case 6: Expect:  PASS */
1021 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1022 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
1023 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1024 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1025 	nvme_tcp_pdu_ch_handle(&tqpair);
1026 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1027 	CU_ASSERT(tqpair.recv_pdu->psh_len == tqpair.recv_pdu->hdr.common.hlen - sizeof(
1028 			  struct spdk_nvme_tcp_common_pdu_hdr));
1029 }
1030 
1031 DEFINE_RETURN_MOCK(spdk_sock_connect_ext, struct spdk_sock *);
1032 struct spdk_sock *
1033 spdk_sock_connect_ext(const char *ip, int port,
1034 		      const char *_impl_name, struct spdk_sock_opts *opts)
1035 {
1036 	HANDLE_RETURN_MOCK(spdk_sock_connect_ext);
1037 	CU_ASSERT(port == 23);
1038 	CU_ASSERT(opts->opts_size == sizeof(*opts));
1039 	CU_ASSERT(opts->priority == 1);
1040 	CU_ASSERT(opts->zcopy == true);
1041 	CU_ASSERT(!strcmp(ip, "192.168.1.78"));
1042 	return (struct spdk_sock *)0xDDADBEEF;
1043 }
1044 
1045 static void
1046 test_nvme_tcp_qpair_connect_sock(void)
1047 {
1048 	struct nvme_tcp_ctrlr tctrlr = {};
1049 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1050 	struct nvme_tcp_qpair tqpair = {};
1051 	int rc;
1052 
1053 	tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1054 	tqpair.qpair.id = 1;
1055 	tqpair.qpair.poll_group = (void *)0xDEADBEEF;
1056 	ctrlr->trid.priority = 1;
1057 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1058 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1059 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1060 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1061 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1062 
1063 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1064 	CU_ASSERT(rc == 0);
1065 
1066 	/* Unsupported family of the transport address */
1067 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IB;
1068 
1069 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1070 	SPDK_CU_ASSERT_FATAL(rc == -1);
1071 
1072 	/* Invalid dst_port, INT_MAX is 2147483647 */
1073 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1074 	memcpy(ctrlr->trid.trsvcid, "2147483647", sizeof("2147483647"));
1075 
1076 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1077 	SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
1078 
1079 	/* Parse invalid address */
1080 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1081 	memcpy(ctrlr->trid.traddr, "192.168.1.256", sizeof("192.168.1.256"));
1082 
1083 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1084 	SPDK_CU_ASSERT_FATAL(rc != 0);
1085 }
1086 
1087 static void
1088 test_nvme_tcp_qpair_icreq_send(void)
1089 {
1090 	struct nvme_tcp_qpair tqpair = {};
1091 	struct spdk_nvme_tcp_stat stats = {};
1092 	struct spdk_nvme_ctrlr ctrlr = {};
1093 	struct nvme_tcp_pdu pdu = {};
1094 	struct nvme_tcp_poll_group poll_group = {};
1095 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1096 	int rc;
1097 
1098 	tqpair.send_pdu = &pdu;
1099 	tqpair.qpair.ctrlr = &ctrlr;
1100 	tqpair.qpair.poll_group = &poll_group.group;
1101 	tqpair.stats = &stats;
1102 	ic_req = &pdu.hdr.ic_req;
1103 
1104 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1105 	tqpair.qpair.ctrlr->opts.header_digest = true;
1106 	tqpair.qpair.ctrlr->opts.data_digest = true;
1107 	TAILQ_INIT(&tqpair.send_queue);
1108 
1109 	rc = nvme_tcp_qpair_icreq_send(&tqpair);
1110 	CU_ASSERT(rc == 0);
1111 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1112 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1113 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1114 	CU_ASSERT(ic_req->pfv == 0);
1115 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1116 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1117 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1118 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1119 }
1120 
1121 static void
1122 test_nvme_tcp_c2h_payload_handle(void)
1123 {
1124 	struct nvme_tcp_qpair tqpair = {};
1125 	struct spdk_nvme_tcp_stat stats = {};
1126 	struct nvme_tcp_pdu pdu = {};
1127 	struct nvme_tcp_req tcp_req = {};
1128 	struct nvme_request	req = {};
1129 	struct nvme_tcp_pdu recv_pdu = {};
1130 	uint32_t reaped = 1;
1131 
1132 	tcp_req.req = &req;
1133 	tcp_req.req->qpair = &tqpair.qpair;
1134 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1135 	tcp_req.tqpair = &tqpair;
1136 	tcp_req.cid = 1;
1137 	tqpair.stats = &stats;
1138 
1139 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1140 
1141 	pdu.req = &tcp_req;
1142 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1143 					SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1144 	pdu.data_len = 1024;
1145 
1146 	tqpair.qpair.id = 1;
1147 	tqpair.recv_pdu = &recv_pdu;
1148 
1149 	/* case 1: nvme_tcp_c2h_data_payload_handle: tcp_req->datao != tcp_req->req->payload_size */
1150 	tcp_req.datao = 1024;
1151 	tcp_req.req->payload_size = 2048;
1152 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1153 	tcp_req.ordering.bits.send_ack = 1;
1154 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1155 	tcp_req.ordering.bits.data_recv = 0;
1156 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1157 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1158 	tqpair.qpair.num_outstanding_reqs = 1;
1159 
1160 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1161 
1162 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1163 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1164 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1165 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1166 	CU_ASSERT(reaped == 2);
1167 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1168 
1169 	/* case 2: nvme_tcp_c2h_data_payload_handle: tcp_req->datao == tcp_req->req->payload_size */
1170 	tcp_req.datao = 1024;
1171 	tcp_req.req->payload_size = 1024;
1172 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1173 	tcp_req.ordering.bits.send_ack = 1;
1174 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1175 	tcp_req.ordering.bits.data_recv = 0;
1176 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1177 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1178 	tqpair.qpair.num_outstanding_reqs = 1;
1179 
1180 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1181 
1182 	CU_ASSERT(tcp_req.rsp.status.p == 1);
1183 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1184 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1185 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1186 	CU_ASSERT(reaped == 3);
1187 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1188 
1189 	/* case 3: nvme_tcp_c2h_data_payload_handle: flag does not have SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS */
1190 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1191 	tcp_req.datao = 1024;
1192 	tcp_req.req->payload_size = 1024;
1193 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1194 	tcp_req.ordering.bits.send_ack = 1;
1195 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1196 	tcp_req.ordering.bits.data_recv = 0;
1197 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1198 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1199 	tqpair.qpair.num_outstanding_reqs = 1;
1200 
1201 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1202 
1203 	CU_ASSERT(reaped == 3);
1204 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1205 
1206 	/* case 4: nvme_tcp_c2h_term_req_payload_handle: recv_state is NVME_TCP_PDU_RECV_STATE_ERROR */
1207 	pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1208 	nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
1209 
1210 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1211 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1212 }
1213 
1214 static void
1215 test_nvme_tcp_icresp_handle(void)
1216 {
1217 	struct nvme_tcp_qpair tqpair = {};
1218 	struct spdk_nvme_tcp_stat stats = {};
1219 	struct nvme_tcp_pdu pdu = {};
1220 	struct nvme_tcp_pdu send_pdu = {};
1221 	struct nvme_tcp_pdu recv_pdu = {};
1222 
1223 	tqpair.send_pdu = &send_pdu;
1224 	tqpair.recv_pdu = &recv_pdu;
1225 	tqpair.stats = &stats;
1226 	TAILQ_INIT(&tqpair.send_queue);
1227 
1228 	/* case 1: Expected ICResp PFV and got are different. */
1229 	pdu.hdr.ic_resp.pfv = 1;
1230 
1231 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1232 
1233 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1234 
1235 	/* case 2: Expected ICResp maxh2cdata and got are different. */
1236 	pdu.hdr.ic_resp.pfv = 0;
1237 	pdu.hdr.ic_resp.maxh2cdata = 2048;
1238 
1239 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1240 
1241 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1242 
1243 	/* case 3: Expected ICResp cpda and got are different. */
1244 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1245 	pdu.hdr.ic_resp.cpda = 64;
1246 
1247 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1248 
1249 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1250 
1251 	/* case 4: waiting icreq ack. */
1252 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1253 	pdu.hdr.ic_resp.cpda = 30;
1254 	pdu.hdr.ic_resp.dgst.bits.hdgst_enable = true;
1255 	pdu.hdr.ic_resp.dgst.bits.ddgst_enable = true;
1256 	tqpair.flags.icreq_send_ack = 0;
1257 
1258 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1259 
1260 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1261 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_INITIALIZING);
1262 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1263 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1264 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1265 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1266 
1267 	/* case 5: Expect: PASS. */
1268 	tqpair.flags.icreq_send_ack = 1;
1269 
1270 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1271 
1272 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1273 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND);
1274 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1275 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1276 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1277 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1278 }
1279 
1280 static void
1281 test_nvme_tcp_pdu_payload_handle(void)
1282 {
1283 	struct nvme_tcp_qpair	tqpair = {};
1284 	struct spdk_nvme_tcp_stat	stats = {};
1285 	struct nvme_tcp_pdu	recv_pdu = {};
1286 	struct nvme_tcp_req	tcp_req = {};
1287 	struct nvme_request	req = {};
1288 	uint32_t		reaped = 0;
1289 
1290 	tqpair.recv_pdu = &recv_pdu;
1291 	tcp_req.tqpair = &tqpair;
1292 	tcp_req.req = &req;
1293 	tcp_req.req->qpair = &tqpair.qpair;
1294 	tqpair.stats = &stats;
1295 
1296 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1297 	tqpair.qpair.id = 1;
1298 	recv_pdu.ddgst_enable = false;
1299 	recv_pdu.req = &tcp_req;
1300 	recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1301 					     SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1302 	recv_pdu.data_len = 1024;
1303 	tcp_req.ordering.bits.data_recv = 0;
1304 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1305 	tcp_req.cid = 1;
1306 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1307 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1308 	tqpair.qpair.num_outstanding_reqs = 1;
1309 
1310 	/* C2H_DATA */
1311 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
1312 	tcp_req.datao = 1024;
1313 	tcp_req.req->payload_size = 2048;
1314 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1315 	tcp_req.ordering.bits.send_ack = 1;
1316 
1317 	recv_pdu.req = &tcp_req;
1318 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1319 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1320 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1321 	CU_ASSERT(tcp_req.rsp.cid == 1);
1322 	CU_ASSERT(tcp_req.rsp.sqid == 1);
1323 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1324 	CU_ASSERT(reaped == 1);
1325 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1326 
1327 	/* TermResp */
1328 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1329 	recv_pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1330 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1331 
1332 	recv_pdu.req = &tcp_req;
1333 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1334 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1335 }
1336 
1337 static void
1338 test_nvme_tcp_capsule_resp_hdr_handle(void)
1339 {
1340 	struct nvme_tcp_qpair	tqpair = {};
1341 	struct spdk_nvme_ctrlr	ctrlr = {};
1342 	struct spdk_nvme_tcp_stat	stats = {};
1343 	struct nvme_request	req = {};
1344 	struct spdk_nvme_cpl	rccqe_tgt = {};
1345 	struct nvme_tcp_req	*tcp_req = NULL;
1346 	uint32_t		reaped = 0;
1347 	int			rc;
1348 
1349 	/* Initialize requests and pdus */
1350 	tqpair.num_entries = 1;
1351 	tqpair.stats = &stats;
1352 	req.qpair = &tqpair.qpair;
1353 	req.qpair->ctrlr = &ctrlr;
1354 	req.payload = NVME_PAYLOAD_CONTIG(NULL, NULL);
1355 
1356 	rc = nvme_tcp_alloc_reqs(&tqpair);
1357 	SPDK_CU_ASSERT_FATAL(rc == 0);
1358 	tcp_req = nvme_tcp_req_get(&tqpair);
1359 	SPDK_CU_ASSERT_FATAL(tcp_req != NULL);
1360 	rc = nvme_tcp_req_init(&tqpair, &req, tcp_req);
1361 	SPDK_CU_ASSERT_FATAL(rc == 0);
1362 	tcp_req->ordering.bits.send_ack = 1;
1363 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1364 	/* tqpair.recv_pdu will be reseted after handling */
1365 	memset(&rccqe_tgt, 0xff, sizeof(rccqe_tgt));
1366 	rccqe_tgt.cid = 0;
1367 	memcpy(&tqpair.recv_pdu->hdr.capsule_resp.rccqe, &rccqe_tgt, sizeof(rccqe_tgt));
1368 	tqpair.qpair.num_outstanding_reqs = 1;
1369 
1370 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1371 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1372 	CU_ASSERT(!memcmp(&tcp_req->rsp, &rccqe_tgt, sizeof(rccqe_tgt)));
1373 	CU_ASSERT(tcp_req->ordering.bits.data_recv == 1);
1374 	CU_ASSERT(reaped == 1);
1375 	CU_ASSERT(TAILQ_EMPTY(&tcp_req->tqpair->outstanding_reqs));
1376 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1377 
1378 	/* Get tcp request error, expect fail */
1379 	reaped = 0;
1380 	tqpair.recv_pdu->hdr.capsule_resp.rccqe.cid = 1;
1381 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1382 
1383 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1384 	CU_ASSERT(reaped == 0);
1385 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1386 	nvme_tcp_free_reqs(&tqpair);
1387 }
1388 
1389 static void
1390 test_nvme_tcp_ctrlr_connect_qpair(void)
1391 {
1392 	struct spdk_nvme_ctrlr ctrlr = {};
1393 	struct spdk_nvme_qpair *qpair;
1394 	struct nvme_tcp_qpair *tqpair;
1395 	struct nvme_tcp_pdu pdu = {};
1396 	struct nvme_tcp_pdu recv_pdu = {};
1397 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1398 	int rc;
1399 
1400 	tqpair = calloc(1, sizeof(*tqpair));
1401 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1402 	tqpair->recv_pdu = &recv_pdu;
1403 	qpair = &tqpair->qpair;
1404 	tqpair->sock = (struct spdk_sock *)0xDEADBEEF;
1405 	tqpair->send_pdu = &pdu;
1406 	tqpair->qpair.ctrlr = &ctrlr;
1407 	tqpair->qpair.state = NVME_QPAIR_CONNECTING;
1408 	tqpair->num_entries = 128;
1409 	ic_req = &pdu.hdr.ic_req;
1410 
1411 	tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1412 	tqpair->recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1413 	tqpair->recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1414 	tqpair->recv_pdu->ch_valid_bytes = sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1415 	tqpair->recv_pdu->psh_valid_bytes = tqpair->recv_pdu->hdr.common.hlen -
1416 					    sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1417 	tqpair->recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
1418 	tqpair->recv_pdu->hdr.ic_resp.cpda = 1;
1419 	tqpair->flags.icreq_send_ack = 1;
1420 	tqpair->qpair.ctrlr->opts.header_digest = true;
1421 	tqpair->qpair.ctrlr->opts.data_digest = true;
1422 	TAILQ_INIT(&tqpair->send_queue);
1423 
1424 	rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
1425 	CU_ASSERT(rc == 0);
1426 
1427 	/* skip NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY state */
1428 	/* assume already received the icresp */
1429 	tqpair->recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1430 
1431 	while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
1432 		rc = nvme_tcp_qpair_process_completions(qpair, 0);
1433 		CU_ASSERT(rc >= 0);
1434 	}
1435 
1436 	CU_ASSERT(tqpair->maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
1437 	CU_ASSERT(tqpair->state == NVME_TCP_QPAIR_STATE_RUNNING);
1438 	CU_ASSERT(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1439 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1440 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1441 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1442 	CU_ASSERT(ic_req->pfv == 0);
1443 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1444 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1445 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1446 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1447 
1448 	nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1449 }
1450 
1451 static void
1452 ut_disconnect_qpair_req_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
1453 {
1454 	CU_ASSERT_EQUAL(cpl->status.sc, SPDK_NVME_SC_ABORTED_SQ_DELETION);
1455 	CU_ASSERT_EQUAL(cpl->status.sct, SPDK_NVME_SCT_GENERIC);
1456 }
1457 
1458 static void
1459 ut_disconnect_qpair_poll_group_cb(struct spdk_nvme_qpair *qpair, void *ctx)
1460 {
1461 	int *disconnected = ctx;
1462 
1463 	(*disconnected)++;
1464 }
1465 
1466 static void
1467 test_nvme_tcp_ctrlr_disconnect_qpair(void)
1468 {
1469 	struct spdk_nvme_ctrlr ctrlr = {};
1470 	struct spdk_nvme_qpair *qpair;
1471 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {};
1472 	struct nvme_tcp_qpair tqpair = {
1473 		.qpair = {
1474 			.trtype = SPDK_NVME_TRANSPORT_TCP,
1475 			.ctrlr = &ctrlr,
1476 			.async = true,
1477 		},
1478 		.recv_pdu = &recv_pdu,
1479 	};
1480 	struct spdk_nvme_poll_group group = {};
1481 	struct nvme_tcp_poll_group tgroup = { .group.group = &group };
1482 	struct nvme_request req = { .qpair = &tqpair.qpair, .cb_fn = ut_disconnect_qpair_req_cb };
1483 	struct nvme_tcp_req treq = { .req = &req, .tqpair = &tqpair };
1484 	int rc, disconnected;
1485 
1486 	qpair = &tqpair.qpair;
1487 	qpair->poll_group = &tgroup.group;
1488 	tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
1489 	tqpair.needs_poll = true;
1490 	TAILQ_INIT(&tgroup.needs_poll);
1491 	STAILQ_INIT(&tgroup.group.disconnected_qpairs);
1492 	TAILQ_INIT(&tqpair.send_queue);
1493 	TAILQ_INIT(&tqpair.free_reqs);
1494 	TAILQ_INIT(&tqpair.outstanding_reqs);
1495 	TAILQ_INSERT_TAIL(&tgroup.needs_poll, &tqpair, link);
1496 	TAILQ_INSERT_TAIL(&tqpair.send_queue, &pdu, tailq);
1497 
1498 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1499 
1500 	CU_ASSERT(tqpair.needs_poll == false);
1501 	CU_ASSERT(tqpair.sock == NULL);
1502 	CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue) == true);
1503 
1504 	/* Check that outstanding requests are aborted */
1505 	treq.state = NVME_TCP_REQ_ACTIVE;
1506 	qpair->num_outstanding_reqs = 1;
1507 	qpair->state = NVME_QPAIR_DISCONNECTING;
1508 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1509 
1510 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1511 
1512 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
1513 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 0);
1514 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.free_reqs));
1515 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1516 
1517 	/* Check that a request with an accel operation in progress won't be aborted until that
1518 	 * operation is completed */
1519 	treq.state = NVME_TCP_REQ_ACTIVE;
1520 	treq.ordering.bits.in_progress_accel = 1;
1521 	qpair->poll_group = NULL;
1522 	qpair->num_outstanding_reqs = 1;
1523 	qpair->state = NVME_QPAIR_DISCONNECTING;
1524 	TAILQ_REMOVE(&tqpair.free_reqs, &treq, link);
1525 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1526 
1527 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1528 
1529 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1530 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1531 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1532 
1533 	/* Check that a qpair will be transitioned to a DISCONNECTED state only once the accel
1534 	 * operation is completed */
1535 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1536 	CU_ASSERT_EQUAL(rc, 0);
1537 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1538 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1539 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1540 
1541 	treq.ordering.bits.in_progress_accel = 0;
1542 	qpair->num_outstanding_reqs = 0;
1543 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1544 
1545 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1546 	CU_ASSERT_EQUAL(rc, -ENXIO);
1547 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1548 
1549 	/* Check the same scenario but this time with spdk_sock_flush() returning errors */
1550 	treq.state = NVME_TCP_REQ_ACTIVE;
1551 	treq.ordering.bits.in_progress_accel = 1;
1552 	qpair->num_outstanding_reqs = 1;
1553 	qpair->state = NVME_QPAIR_DISCONNECTING;
1554 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1555 
1556 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1557 
1558 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1559 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1560 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1561 
1562 	MOCK_SET(spdk_sock_flush, -ENOTCONN);
1563 	treq.ordering.bits.in_progress_accel = 0;
1564 	qpair->num_outstanding_reqs = 0;
1565 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1566 
1567 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1568 	CU_ASSERT_EQUAL(rc, 0);
1569 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1570 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1571 	CU_ASSERT_EQUAL(rc, -ENXIO);
1572 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1573 	MOCK_CLEAR(spdk_sock_flush);
1574 
1575 	/* Now check the same scenario, but with a qpair that's part of a poll group */
1576 	disconnected = 0;
1577 	group.ctx = &disconnected;
1578 	treq.state = NVME_TCP_REQ_ACTIVE;
1579 	treq.ordering.bits.in_progress_accel = 1;
1580 	qpair->poll_group = &tgroup.group;
1581 	qpair->num_outstanding_reqs = 1;
1582 	qpair->state = NVME_QPAIR_DISCONNECTING;
1583 	STAILQ_INSERT_TAIL(&tgroup.group.disconnected_qpairs, qpair, poll_group_stailq);
1584 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1585 
1586 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1587 						ut_disconnect_qpair_poll_group_cb);
1588 	/* Until there's an outstanding request, disconnect_cb shouldn't be executed */
1589 	CU_ASSERT_EQUAL(disconnected, 0);
1590 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1591 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1592 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1593 
1594 	treq.ordering.bits.in_progress_accel = 0;
1595 	qpair->num_outstanding_reqs = 0;
1596 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1597 
1598 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1599 						ut_disconnect_qpair_poll_group_cb);
1600 	CU_ASSERT_EQUAL(disconnected, 1);
1601 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1602 
1603 	/* Check that a non-async qpair is marked as disconnected immediately */
1604 	qpair->poll_group = NULL;
1605 	qpair->state = NVME_QPAIR_DISCONNECTING;
1606 	qpair->async = false;
1607 
1608 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1609 
1610 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1611 }
1612 
1613 static void
1614 test_nvme_tcp_ctrlr_create_io_qpair(void)
1615 {
1616 	struct spdk_nvme_qpair *qpair = NULL;
1617 	struct nvme_tcp_ctrlr tctrlr = {};
1618 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1619 	uint16_t qid = 1;
1620 	struct spdk_nvme_io_qpair_opts opts = {
1621 		.io_queue_size = 2,
1622 		.qprio = SPDK_NVME_QPRIO_URGENT,
1623 		.io_queue_requests = 1,
1624 	};
1625 	struct nvme_tcp_qpair *tqpair;
1626 
1627 	ctrlr->trid.priority = 1;
1628 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1629 	memset(ctrlr->opts.psk, 0, sizeof(ctrlr->opts.psk));
1630 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1631 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1632 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1633 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1634 
1635 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1636 	tqpair = nvme_tcp_qpair(qpair);
1637 
1638 	CU_ASSERT(qpair != NULL);
1639 	CU_ASSERT(qpair->id == 1);
1640 	CU_ASSERT(qpair->ctrlr == ctrlr);
1641 	CU_ASSERT(qpair->qprio == SPDK_NVME_QPRIO_URGENT);
1642 	CU_ASSERT(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
1643 	CU_ASSERT(qpair->poll_group == (void *)0xDEADBEEF);
1644 	CU_ASSERT(tqpair->num_entries == 1);
1645 
1646 	free(tqpair->tcp_reqs);
1647 	spdk_free(tqpair->send_pdus);
1648 	free(tqpair);
1649 
1650 	/* Max queue size shall pass */
1651 	opts.io_queue_size = 0xffff;
1652 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1653 	tqpair = nvme_tcp_qpair(qpair);
1654 
1655 	CU_ASSERT(qpair != NULL);
1656 	CU_ASSERT(tqpair->num_entries == 0xfffe);
1657 
1658 	free(tqpair->tcp_reqs);
1659 	spdk_free(tqpair->send_pdus);
1660 	free(tqpair);
1661 
1662 	/* Queue size 0 shall fail */
1663 	opts.io_queue_size = 0;
1664 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1665 	CU_ASSERT(qpair == NULL);
1666 
1667 	/* Queue size 1 shall fail */
1668 	opts.io_queue_size = 1;
1669 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1670 	CU_ASSERT(qpair == NULL);
1671 }
1672 
1673 static void
1674 test_nvme_tcp_ctrlr_delete_io_qpair(void)
1675 {
1676 	struct spdk_nvme_ctrlr	ctrlr = {};
1677 	struct spdk_nvme_qpair *qpair;
1678 	struct nvme_tcp_qpair *tqpair;
1679 	struct nvme_tcp_req tcp_req = {};
1680 	struct nvme_request	req = {};
1681 	int rc;
1682 
1683 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1684 	tqpair->tcp_reqs = calloc(1, sizeof(struct nvme_tcp_req));
1685 	tqpair->send_pdus = calloc(1, sizeof(struct nvme_tcp_pdu));
1686 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1687 	qpair = &tqpair->qpair;
1688 	qpair->ctrlr = &ctrlr;
1689 	tcp_req.req = &req;
1690 	tcp_req.req->qpair = &tqpair->qpair;
1691 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1692 	tcp_req.tqpair = tqpair;
1693 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1694 	TAILQ_INIT(&tqpair->outstanding_reqs);
1695 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1696 	qpair->num_outstanding_reqs = 1;
1697 
1698 	rc = nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1699 
1700 	CU_ASSERT(rc == 0);
1701 }
1702 
1703 static void
1704 test_nvme_tcp_poll_group_get_stats(void)
1705 {
1706 	int rc = 0;
1707 	struct spdk_sock_group sgroup = {};
1708 	struct nvme_tcp_poll_group *pgroup = NULL;
1709 	struct spdk_nvme_transport_poll_group *tgroup = NULL;
1710 	struct spdk_nvme_transport_poll_group_stat *tgroup_stat = NULL;
1711 
1712 	MOCK_SET(spdk_sock_group_create, &sgroup);
1713 	tgroup = nvme_tcp_poll_group_create();
1714 	CU_ASSERT(tgroup != NULL);
1715 	pgroup = nvme_tcp_poll_group(tgroup);
1716 	CU_ASSERT(pgroup != NULL);
1717 
1718 	/* Invalid group pointer, expect fail and return -EINVAL */
1719 	rc = nvme_tcp_poll_group_get_stats(NULL, &tgroup_stat);
1720 	CU_ASSERT(rc == -EINVAL);
1721 	CU_ASSERT(tgroup_stat == NULL);
1722 
1723 	/* Invalid stats, expect fail and return -EINVAL */
1724 	rc = nvme_tcp_poll_group_get_stats(tgroup, NULL);
1725 	CU_ASSERT(rc == -EINVAL);
1726 
1727 	/* Get stats success */
1728 	rc = nvme_tcp_poll_group_get_stats(tgroup, &tgroup_stat);
1729 	CU_ASSERT(rc == 0);
1730 	CU_ASSERT(tgroup_stat != NULL);
1731 	CU_ASSERT(tgroup_stat->trtype == SPDK_NVME_TRANSPORT_TCP);
1732 	CU_ASSERT(memcmp(&tgroup_stat->tcp, &pgroup->stats, sizeof(struct spdk_nvme_tcp_stat)) == 0);
1733 
1734 	nvme_tcp_poll_group_free_stats(tgroup, tgroup_stat);
1735 	rc = nvme_tcp_poll_group_destroy(tgroup);
1736 	CU_ASSERT(rc == 0);
1737 
1738 	MOCK_CLEAR(spdk_sock_group_create);
1739 }
1740 
1741 static void
1742 test_nvme_tcp_ctrlr_construct(void)
1743 {
1744 	struct nvme_tcp_qpair *tqpair = NULL;
1745 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1746 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1747 	struct spdk_nvme_transport_id trid = {
1748 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1749 		.priority = 1,
1750 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1751 		.traddr = "192.168.1.78",
1752 		.trsvcid = "23",
1753 	};
1754 	struct spdk_nvme_ctrlr_opts opts = {
1755 		.admin_queue_size = 2,
1756 		.src_addr = "192.168.1.77",
1757 		.src_svcid = "23",
1758 	};
1759 
1760 	/* Transmit ACK timeout value exceeds max, expected to pass and using max */
1761 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1762 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDEADBEEF);
1763 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1764 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1765 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1766 
1767 	CU_ASSERT(ctrlr != NULL);
1768 	CU_ASSERT(tctrlr != NULL);
1769 	CU_ASSERT(tqpair != NULL);
1770 	CU_ASSERT(ctrlr->opts.transport_ack_timeout == NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
1771 	CU_ASSERT(memcmp(&ctrlr->trid, &trid, sizeof(struct spdk_nvme_transport_id)) == 0);
1772 	CU_ASSERT(tqpair->num_entries == 1);
1773 	CU_ASSERT(TAILQ_EMPTY(&tqpair->send_queue));
1774 	CU_ASSERT(TAILQ_EMPTY(&tqpair->outstanding_reqs));
1775 	CU_ASSERT(!TAILQ_EMPTY(&tqpair->free_reqs));
1776 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs) == &tqpair->tcp_reqs[0]);
1777 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->cid == 0);
1778 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->tqpair == tqpair);
1779 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->pdu == &tqpair->send_pdus[0]);
1780 	CU_ASSERT(tqpair->send_pdu == &tqpair->send_pdus[1]);
1781 	CU_ASSERT(tqpair->recv_pdu == &tqpair->send_pdus[2]);
1782 
1783 	free(tqpair->tcp_reqs);
1784 	spdk_free(tqpair->send_pdus);
1785 	free(tqpair);
1786 	free(tctrlr);
1787 
1788 	/* The Admin queue size is less than the minimum required size, expected to create Admin qpair failed */
1789 	opts.admin_queue_size = 1;
1790 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1791 	CU_ASSERT(ctrlr == NULL);
1792 
1793 	/* Unhandled ADRFAM, expected to create Admin qpair failed */
1794 	opts.admin_queue_size = 2;
1795 	trid.adrfam = SPDK_NVMF_ADRFAM_INTRA_HOST;
1796 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1797 	CU_ASSERT(ctrlr == NULL);
1798 
1799 	/* Error connecting socket, expected to create Admin qpair failed */
1800 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1801 	MOCK_SET(spdk_sock_connect_ext, NULL);
1802 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1803 	CU_ASSERT(ctrlr == NULL);
1804 
1805 	MOCK_CLEAR(spdk_sock_connect_ext);
1806 }
1807 
1808 static void
1809 test_nvme_tcp_qpair_submit_request(void)
1810 {
1811 	int rc = 0;
1812 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1813 	struct nvme_tcp_qpair *tqpair = NULL;
1814 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1815 	struct nvme_tcp_req *tcp_req = NULL;
1816 	struct nvme_request req = {};
1817 	struct nvme_tcp_ut_bdev_io bio = {};
1818 	struct spdk_nvme_tcp_stat stat = {};
1819 	struct spdk_nvme_transport_id trid = {
1820 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1821 		.priority = 1,
1822 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1823 		.traddr = "192.168.1.78",
1824 		.trsvcid = "23",
1825 	};
1826 	struct spdk_nvme_ctrlr_opts opts = {
1827 		.admin_queue_size = 2,
1828 		.src_addr = "192.168.1.77",
1829 		.src_svcid = "23",
1830 	};
1831 
1832 	/* Construct TCP Controller */
1833 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1834 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDCADBEEF);
1835 
1836 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1837 	CU_ASSERT(ctrlr != NULL);
1838 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1839 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1840 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
1841 	CU_ASSERT(tctrlr != NULL);
1842 	CU_ASSERT(tqpair != NULL);
1843 	CU_ASSERT(tcp_req->pdu != NULL);
1844 	CU_ASSERT(tqpair->num_entries == 1);
1845 
1846 	tqpair->stats = &stat;
1847 	req.qpair = &tqpair->qpair;
1848 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
1849 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
1850 
1851 	/* Failed to construct request, because not enough max_sges */
1852 	req.qpair->ctrlr->max_sges = 1;
1853 	req.payload_size = 2048;
1854 	req.payload_offset = 0;
1855 	bio.iovpos = 0;
1856 	bio.iovs[0].iov_len = 1024;
1857 	bio.iovs[1].iov_len = 1024;
1858 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
1859 	bio.iovs[1].iov_base = (void *)0xDFADBEEF;
1860 
1861 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1862 	CU_ASSERT(rc == -1);
1863 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->free_reqs));
1864 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_FREE);
1865 
1866 	/* Multiple SGL, expected to pass */
1867 	req.qpair->ctrlr->max_sges = 2;
1868 
1869 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1870 	CU_ASSERT(rc == 0);
1871 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1872 	CU_ASSERT(NULL == TAILQ_FIRST(&tqpair->free_reqs));
1873 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->outstanding_reqs));
1874 	CU_ASSERT(tcp_req->expected_datao == 0);
1875 	CU_ASSERT(tcp_req->req == &req);
1876 	CU_ASSERT(tcp_req->r2tl_remain == 0);
1877 	CU_ASSERT(tcp_req->r2tl_remain_next == 0);
1878 	CU_ASSERT(tcp_req->active_r2ts == 0);
1879 	CU_ASSERT(tcp_req->iovcnt == 2);
1880 	CU_ASSERT(tcp_req->ordering.raw == 0);
1881 	CU_ASSERT(req.cmd.cid == tcp_req->cid);
1882 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
1883 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
1884 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
1885 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
1886 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
1887 	CU_ASSERT(tcp_req->in_capsule_data == true);
1888 	CU_ASSERT(tcp_req->iov[0].iov_len == bio.iovs[0].iov_len);
1889 	CU_ASSERT(tcp_req->iov[1].iov_len == bio.iovs[1].iov_len);
1890 	CU_ASSERT(tcp_req->iov[0].iov_base == bio.iovs[0].iov_base);
1891 	CU_ASSERT(tcp_req->iov[1].iov_base == bio.iovs[1].iov_base);
1892 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
1893 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) == 0);
1894 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_DDGSTF) == 0);
1895 	CU_ASSERT(tcp_req->datao == 0);
1896 	CU_ASSERT(tcp_req->pdu->data_len == req.payload_size);
1897 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdo == sizeof(struct spdk_nvme_tcp_cmd));
1898 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.plen == sizeof(struct spdk_nvme_tcp_cmd) +
1899 		  req.payload_size);
1900 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_base == (void *)0xDEADBEEF);
1901 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_len == 1024);
1902 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_base == (void *)0xDFADBEEF);
1903 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_len == 1024);
1904 	CU_ASSERT(tcp_req->pdu->data_iovcnt == 2);
1905 
1906 	/* Request resource limit reached, expected to return -EAGAIN */
1907 	memset(&req, 0x00, sizeof(struct nvme_request));
1908 	CU_ASSERT(tqpair->stats->queued_requests == 0);
1909 
1910 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1911 	CU_ASSERT(rc == -EAGAIN);
1912 	CU_ASSERT(tqpair->stats->queued_requests == 1);
1913 
1914 	MOCK_CLEAR(spdk_sock_connect_ext);
1915 	free(tqpair->tcp_reqs);
1916 	spdk_free(tqpair->send_pdus);
1917 	free(tqpair);
1918 	free(tctrlr);
1919 }
1920 
1921 int
1922 main(int argc, char **argv)
1923 {
1924 	CU_pSuite	suite = NULL;
1925 	unsigned int	num_failures;
1926 
1927 	CU_initialize_registry();
1928 
1929 	suite = CU_add_suite("nvme_tcp", NULL, NULL);
1930 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
1931 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
1932 	CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
1933 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
1934 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
1935 	CU_ADD_TEST(suite, test_nvme_tcp_req_complete_safe);
1936 	CU_ADD_TEST(suite, test_nvme_tcp_req_get);
1937 	CU_ADD_TEST(suite, test_nvme_tcp_req_init);
1938 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_capsule_cmd_send);
1939 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_write_pdu);
1940 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_set_recv_state);
1941 	CU_ADD_TEST(suite, test_nvme_tcp_alloc_reqs);
1942 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_send_h2c_term_req);
1943 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_ch_handle);
1944 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_connect_sock);
1945 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_icreq_send);
1946 	CU_ADD_TEST(suite, test_nvme_tcp_c2h_payload_handle);
1947 	CU_ADD_TEST(suite, test_nvme_tcp_icresp_handle);
1948 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_payload_handle);
1949 	CU_ADD_TEST(suite, test_nvme_tcp_capsule_resp_hdr_handle);
1950 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_connect_qpair);
1951 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_disconnect_qpair);
1952 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_create_io_qpair);
1953 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_delete_io_qpair);
1954 	CU_ADD_TEST(suite, test_nvme_tcp_poll_group_get_stats);
1955 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_construct);
1956 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_submit_request);
1957 
1958 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1959 	CU_cleanup_registry();
1960 	return num_failures;
1961 }
1962