xref: /spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c (revision 83ba9086796471697a4975a58f60e2392bccd08c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2019 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk/nvme.h"
9 
10 #include "spdk_internal/cunit.h"
11 
12 #include "common/lib/test_sock.c"
13 #include "nvme/nvme_internal.h"
14 #include "common/lib/nvme/common_stubs.h"
15 
16 /* nvme_transport_ctrlr_disconnect_qpair_done() stub is defined in common_stubs.h, but we need to
17  * override it here */
18 static void nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair);
19 #define nvme_transport_ctrlr_disconnect_qpair_done nvme_transport_ctrlr_disconnect_qpair_done_mocked
20 
21 #include "nvme/nvme_tcp.c"
22 
23 SPDK_LOG_REGISTER_COMPONENT(nvme)
24 
25 DEFINE_STUB(nvme_qpair_submit_request,
26 	    int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
27 
28 DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
29 		struct spdk_nvme_qpair *qpair), 0);
30 DEFINE_STUB(spdk_sock_get_optimal_sock_group,
31 	    int,
32 	    (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint),
33 	    0);
34 
35 DEFINE_STUB(spdk_sock_group_get_ctx,
36 	    void *,
37 	    (struct spdk_sock_group *group),
38 	    NULL);
39 DEFINE_STUB(spdk_sock_get_numa_id, int32_t, (struct spdk_sock *sock), SPDK_ENV_NUMA_ID_ANY);
40 
41 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
42 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
43 
44 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
45 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
46 
47 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
48 		struct spdk_nvme_cmd *cmd));
49 
50 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
51 		struct spdk_nvme_cpl *cpl));
52 DEFINE_STUB(spdk_key_get_key, int, (struct spdk_key *key, void *buf, int len), 0);
53 DEFINE_STUB(spdk_key_get_name, const char *, (struct spdk_key *key), NULL);
54 
55 DEFINE_STUB(spdk_memory_domain_get_system_domain, struct spdk_memory_domain *, (void), NULL);
56 DEFINE_STUB(spdk_memory_domain_translate_data, int,
57 	    (struct spdk_memory_domain *src_domain, void *src_domain_ctx,
58 	     struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
59 	     void *addr, size_t len, struct spdk_memory_domain_translation_result *result), 0);
60 DEFINE_STUB_V(spdk_memory_domain_invalidate_data, (struct spdk_memory_domain *domain,
61 		void *domain_ctx, struct iovec *iov, uint32_t iovcnt));
62 
63 static void
64 nvme_transport_ctrlr_disconnect_qpair_done_mocked(struct spdk_nvme_qpair *qpair)
65 {
66 	qpair->state = NVME_QPAIR_DISCONNECTED;
67 }
68 
69 static void
70 test_nvme_tcp_pdu_set_data_buf(void)
71 {
72 	struct nvme_tcp_pdu pdu = {};
73 	struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
74 	uint32_t data_len;
75 	uint64_t i;
76 
77 	/* 1st case: input is a single SGL entry. */
78 	iov[0].iov_base = (void *)0xDEADBEEF;
79 	iov[0].iov_len = 4096;
80 
81 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
82 
83 	CU_ASSERT(pdu.data_iovcnt == 1);
84 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
85 	CU_ASSERT(pdu.data_iov[0].iov_len == 512);
86 
87 	/* 2nd case: simulate split on multiple SGL entries. */
88 	iov[0].iov_base = (void *)0xDEADBEEF;
89 	iov[0].iov_len = 4096;
90 	iov[1].iov_base = (void *)0xFEEDBEEF;
91 	iov[1].iov_len = 512 * 7;
92 	iov[2].iov_base = (void *)0xF00DF00D;
93 	iov[2].iov_len = 4096 * 2;
94 
95 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
96 
97 	CU_ASSERT(pdu.data_iovcnt == 1);
98 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
99 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
100 
101 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
102 
103 	CU_ASSERT(pdu.data_iovcnt == 2);
104 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
105 	CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
106 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
107 	CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
108 
109 	nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
110 
111 	CU_ASSERT(pdu.data_iovcnt == 2);
112 	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
113 	CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
114 	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
115 	CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
116 
117 	/* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
118 	 * entries.
119 	 */
120 	data_len = 0;
121 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
122 		iov[i].iov_base = (void *)(0xDEADBEEF + i);
123 		iov[i].iov_len = 512 * (i + 1);
124 		data_len += 512 * (i + 1);
125 	}
126 
127 	nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
128 
129 	CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
130 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
131 		CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
132 		CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
133 	}
134 }
135 
136 static void
137 test_nvme_tcp_build_iovs(void)
138 {
139 	const uintptr_t pdu_iov_len = 4096;
140 	struct nvme_tcp_pdu pdu = {};
141 	struct iovec iovs[5] = {};
142 	uint32_t mapped_length = 0;
143 	int rc;
144 
145 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
146 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
147 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
148 			      SPDK_NVME_TCP_DIGEST_LEN;
149 	pdu.data_len = pdu_iov_len * 2;
150 	pdu.padding_len = 0;
151 
152 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
153 	pdu.data_iov[0].iov_len = pdu_iov_len;
154 	pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
155 	pdu.data_iov[1].iov_len = pdu_iov_len;
156 	pdu.data_iovcnt = 2;
157 
158 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
159 	CU_ASSERT(rc == 4);
160 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
161 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
162 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
163 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
164 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
165 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
166 	CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
167 	CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
168 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
169 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
170 
171 	/* Add a new data_iov entry, update pdu iov count and data length */
172 	pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
173 	pdu.data_iov[2].iov_len = 123;
174 	pdu.data_iovcnt = 3;
175 	pdu.data_len += 123;
176 	pdu.hdr.common.plen += 123;
177 
178 	rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
179 	CU_ASSERT(rc == 5);
180 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
181 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
182 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
183 	CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
184 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
185 	CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
186 	CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
187 	CU_ASSERT(iovs[3].iov_len == 123);
188 	CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
189 	CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
190 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
191 		  pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
192 }
193 
194 struct nvme_tcp_ut_bdev_io {
195 	struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
196 	int iovpos;
197 };
198 
199 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
200 static void
201 nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
202 {
203 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
204 	struct iovec *iov;
205 
206 	for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
207 		iov = &bio->iovs[bio->iovpos];
208 		/* Offset must be aligned with the start of any SGL entry */
209 		if (offset == 0) {
210 			break;
211 		}
212 
213 		SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
214 		offset -= iov->iov_len;
215 	}
216 
217 	SPDK_CU_ASSERT_FATAL(offset == 0);
218 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
219 }
220 
221 static int
222 nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
223 {
224 	struct nvme_tcp_ut_bdev_io *bio = cb_arg;
225 	struct iovec *iov;
226 
227 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
228 
229 	iov = &bio->iovs[bio->iovpos];
230 
231 	*address = iov->iov_base;
232 	*length = iov->iov_len;
233 	bio->iovpos++;
234 
235 	return 0;
236 }
237 
238 static void
239 test_nvme_tcp_build_sgl_request(void)
240 {
241 	struct nvme_tcp_qpair tqpair;
242 	struct spdk_nvme_ctrlr ctrlr = {{0}};
243 	struct nvme_tcp_req tcp_req = {0};
244 	struct nvme_request req = {{0}};
245 	struct nvme_tcp_ut_bdev_io bio;
246 	uint64_t i;
247 	int rc;
248 
249 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
250 	tqpair.qpair.ctrlr = &ctrlr;
251 	tcp_req.req = &req;
252 
253 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
254 	req.qpair = &tqpair.qpair;
255 
256 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
257 		bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
258 		bio.iovs[i].iov_len = 0;
259 	}
260 
261 	/* Test case 1: Single SGL. Expected: PASS */
262 	bio.iovpos = 0;
263 	req.payload_offset = 0;
264 	req.payload_size = 0x1000;
265 	bio.iovs[0].iov_len = 0x1000;
266 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
267 	SPDK_CU_ASSERT_FATAL(rc == 0);
268 	CU_ASSERT(bio.iovpos == 1);
269 	CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
270 	CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
271 	CU_ASSERT(tcp_req.iovcnt == 1);
272 
273 	/* Test case 2: Multiple SGL. Expected: PASS */
274 	bio.iovpos = 0;
275 	req.payload_offset = 0;
276 	req.payload_size = 0x4000;
277 	for (i = 0; i < 4; i++) {
278 		bio.iovs[i].iov_len = 0x1000;
279 	}
280 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
281 	SPDK_CU_ASSERT_FATAL(rc == 0);
282 	CU_ASSERT(bio.iovpos == 4);
283 	CU_ASSERT(tcp_req.iovcnt == 4);
284 	for (i = 0; i < 4; i++) {
285 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
286 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
287 	}
288 
289 	/* Test case 3: Payload is bigger than SGL. Expected: FAIL */
290 	bio.iovpos = 0;
291 	req.payload_offset = 0;
292 	req.payload_size = 0x17000;
293 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
294 		bio.iovs[i].iov_len = 0x1000;
295 	}
296 	rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
297 	SPDK_CU_ASSERT_FATAL(rc != 0);
298 	CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
299 	for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
300 		CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
301 		CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
302 	}
303 }
304 
305 static void
306 test_nvme_tcp_pdu_set_data_buf_with_md(void)
307 {
308 	struct nvme_tcp_pdu pdu = {};
309 	struct iovec iovs[7] = {};
310 	struct spdk_dif_ctx dif_ctx = {};
311 	int rc;
312 	struct spdk_dif_ctx_init_ext_opts dif_opts;
313 
314 	pdu.dif_ctx = &dif_ctx;
315 
316 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
317 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
318 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
319 			       0, 0, 0, 0, 0, &dif_opts);
320 	CU_ASSERT(rc == 0);
321 
322 	/* Single iovec case */
323 	iovs[0].iov_base = (void *)0xDEADBEEF;
324 	iovs[0].iov_len = 2080;
325 
326 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
327 
328 	CU_ASSERT(dif_ctx.data_offset == 0);
329 	CU_ASSERT(pdu.data_len == 500);
330 	CU_ASSERT(pdu.data_iovcnt == 1);
331 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
332 	CU_ASSERT(pdu.data_iov[0].iov_len == 500);
333 
334 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
335 
336 	CU_ASSERT(dif_ctx.data_offset == 500);
337 	CU_ASSERT(pdu.data_len == 1000);
338 	CU_ASSERT(pdu.data_iovcnt == 1);
339 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
340 	CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
341 
342 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
343 
344 	CU_ASSERT(dif_ctx.data_offset == 1500);
345 	CU_ASSERT(pdu.data_len == 548);
346 	CU_ASSERT(pdu.data_iovcnt == 1);
347 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
348 	CU_ASSERT(pdu.data_iov[0].iov_len == 564);
349 
350 	/* Multiple iovecs case */
351 	iovs[0].iov_base = (void *)0xDEADBEEF;
352 	iovs[0].iov_len = 256;
353 	iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
354 	iovs[1].iov_len = 256 + 1;
355 	iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
356 	iovs[2].iov_len = 4;
357 	iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
358 	iovs[3].iov_len = 3 + 123;
359 	iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
360 	iovs[4].iov_len = 389 + 6;
361 	iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
362 	iovs[5].iov_len = 2 + 512 + 8 + 432;
363 	iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
364 	iovs[6].iov_len = 80 + 8;
365 
366 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
367 
368 	CU_ASSERT(dif_ctx.data_offset == 0);
369 	CU_ASSERT(pdu.data_len == 500);
370 	CU_ASSERT(pdu.data_iovcnt == 2);
371 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
372 	CU_ASSERT(pdu.data_iov[0].iov_len == 256);
373 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
374 	CU_ASSERT(pdu.data_iov[1].iov_len == 244);
375 
376 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
377 
378 	CU_ASSERT(dif_ctx.data_offset == 500);
379 	CU_ASSERT(pdu.data_len == 1000);
380 	CU_ASSERT(pdu.data_iovcnt == 5);
381 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
382 	CU_ASSERT(pdu.data_iov[0].iov_len == 13);
383 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
384 	CU_ASSERT(pdu.data_iov[1].iov_len == 4);
385 	CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
386 	CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
387 	CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
388 	CU_ASSERT(pdu.data_iov[3].iov_len == 395);
389 	CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
390 	CU_ASSERT(pdu.data_iov[4].iov_len == 478);
391 
392 	nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
393 
394 	CU_ASSERT(dif_ctx.data_offset == 1500);
395 	CU_ASSERT(pdu.data_len == 548);
396 	CU_ASSERT(pdu.data_iovcnt == 2);
397 	CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
398 	CU_ASSERT(pdu.data_iov[0].iov_len == 476);
399 	CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
400 	CU_ASSERT(pdu.data_iov[1].iov_len == 88);
401 }
402 
403 static void
404 test_nvme_tcp_build_iovs_with_md(void)
405 {
406 	struct nvme_tcp_pdu pdu = {};
407 	struct iovec iovs[11] = {};
408 	struct spdk_dif_ctx dif_ctx = {};
409 	uint32_t mapped_length = 0;
410 	int rc;
411 	struct spdk_dif_ctx_init_ext_opts dif_opts;
412 
413 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
414 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
415 	rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
416 			       0, 0, 0, 0, 0, &dif_opts);
417 	CU_ASSERT(rc == 0);
418 
419 	pdu.dif_ctx = &dif_ctx;
420 
421 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
422 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
423 	pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
424 			      SPDK_NVME_TCP_DIGEST_LEN;
425 	pdu.data_len = 512 * 8;
426 	pdu.padding_len = 0;
427 
428 	pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
429 	pdu.data_iov[0].iov_len = (512 + 8) * 8;
430 	pdu.data_iovcnt = 1;
431 
432 	rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
433 	CU_ASSERT(rc == 10);
434 	CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
435 	CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
436 	CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
437 	CU_ASSERT(iovs[1].iov_len == 512);
438 	CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
439 	CU_ASSERT(iovs[2].iov_len == 512);
440 	CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
441 	CU_ASSERT(iovs[3].iov_len == 512);
442 	CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
443 	CU_ASSERT(iovs[4].iov_len == 512);
444 	CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
445 	CU_ASSERT(iovs[5].iov_len == 512);
446 	CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
447 	CU_ASSERT(iovs[6].iov_len == 512);
448 	CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
449 	CU_ASSERT(iovs[7].iov_len == 512);
450 	CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
451 	CU_ASSERT(iovs[8].iov_len == 512);
452 	CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
453 	CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
454 	CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
455 		  512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
456 }
457 
458 /* Just define, nothing to do */
459 static void
460 ut_nvme_complete_request(void *arg, const struct spdk_nvme_cpl *cpl)
461 {
462 	return;
463 }
464 
465 static void
466 test_nvme_tcp_req_complete_safe(void)
467 {
468 	bool rc;
469 	struct nvme_tcp_req	tcp_req = {0};
470 	struct nvme_request	req = {{0}};
471 	struct nvme_tcp_qpair	tqpair = {{0}};
472 
473 	tcp_req.req = &req;
474 	tcp_req.req->qpair = &tqpair.qpair;
475 	tcp_req.req->cb_fn = ut_nvme_complete_request;
476 	tcp_req.tqpair = &tqpair;
477 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
478 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
479 	tqpair.qpair.num_outstanding_reqs = 1;
480 
481 	/* Test case 1: send operation and transfer completed. Expect: PASS */
482 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
483 	tcp_req.ordering.bits.send_ack = 1;
484 	tcp_req.ordering.bits.data_recv = 1;
485 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
486 
487 	rc = nvme_tcp_req_complete_safe(&tcp_req);
488 	CU_ASSERT(rc == true);
489 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
490 
491 	/* Test case 2: send operation not completed. Expect: FAIL */
492 	tcp_req.ordering.raw = 0;
493 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
494 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
495 	tqpair.qpair.num_outstanding_reqs = 1;
496 
497 	rc = nvme_tcp_req_complete_safe(&tcp_req);
498 	SPDK_CU_ASSERT_FATAL(rc != true);
499 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
500 	TAILQ_REMOVE(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
501 
502 	/* Test case 3: in completion context. Expect: PASS */
503 	tqpair.qpair.in_completion_context = 1;
504 	tqpair.async_complete = 0;
505 	tcp_req.ordering.bits.send_ack = 1;
506 	tcp_req.ordering.bits.data_recv = 1;
507 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
508 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
509 	tqpair.qpair.num_outstanding_reqs = 1;
510 
511 	rc = nvme_tcp_req_complete_safe(&tcp_req);
512 	CU_ASSERT(rc == true);
513 	CU_ASSERT(tcp_req.tqpair->async_complete == 0);
514 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
515 
516 	/* Test case 4: in async complete. Expect: PASS */
517 	tqpair.qpair.in_completion_context = 0;
518 	tcp_req.ordering.bits.send_ack = 1;
519 	tcp_req.ordering.bits.data_recv = 1;
520 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
521 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
522 	tqpair.qpair.num_outstanding_reqs = 1;
523 
524 	rc = nvme_tcp_req_complete_safe(&tcp_req);
525 	CU_ASSERT(rc == true);
526 	CU_ASSERT(tcp_req.tqpair->async_complete);
527 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
528 }
529 
530 static void
531 test_nvme_tcp_req_init(void)
532 {
533 	struct nvme_tcp_qpair tqpair = {};
534 	struct nvme_request req = {};
535 	struct nvme_tcp_req tcp_req = {0};
536 	struct spdk_nvme_ctrlr ctrlr = {{0}};
537 	struct nvme_tcp_ut_bdev_io bio = {};
538 	int rc;
539 
540 	tqpair.qpair.ctrlr = &ctrlr;
541 	req.qpair = &tqpair.qpair;
542 
543 	tcp_req.cid = 1;
544 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
545 	req.payload_offset = 0;
546 	req.payload_size = 4096;
547 	ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
548 	ctrlr.ioccsz_bytes = 1024;
549 	bio.iovpos = 0;
550 	bio.iovs[0].iov_len = 8192;
551 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
552 
553 	/* Test case1: payload type SGL. Expect: PASS */
554 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
555 	req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
556 
557 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
558 	CU_ASSERT(rc == 0);
559 	CU_ASSERT(tcp_req.req == &req);
560 	CU_ASSERT(tcp_req.in_capsule_data == true);
561 	CU_ASSERT(tcp_req.iovcnt == 1);
562 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
563 	CU_ASSERT(tcp_req.iov[0].iov_base == bio.iovs[0].iov_base);
564 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
565 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
566 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
567 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
568 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
569 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
570 
571 	/* Test case2: payload type CONTIG. Expect: PASS */
572 	memset(&req.cmd, 0, sizeof(req.cmd));
573 	memset(&tcp_req, 0, sizeof(tcp_req));
574 	tcp_req.cid = 1;
575 	req.payload = NVME_PAYLOAD_CONTIG(&bio, NULL);
576 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
577 
578 	rc = nvme_tcp_req_init(&tqpair, &req, &tcp_req);
579 	CU_ASSERT(rc == 0);
580 	CU_ASSERT(tcp_req.req == &req);
581 	CU_ASSERT(tcp_req.in_capsule_data == true);
582 	CU_ASSERT(tcp_req.iov[0].iov_len == req.payload_size);
583 	CU_ASSERT(tcp_req.iov[0].iov_base == &bio);
584 	CU_ASSERT(tcp_req.iovcnt == 1);
585 	CU_ASSERT(req.cmd.cid == tcp_req.cid);
586 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
587 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
588 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
589 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
590 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
591 
592 }
593 
594 static void
595 test_nvme_tcp_req_get(void)
596 {
597 	struct nvme_tcp_req tcp_req = {0};
598 	struct nvme_tcp_qpair tqpair = {};
599 	struct nvme_tcp_pdu send_pdu = {};
600 
601 	tcp_req.pdu = &send_pdu;
602 	tcp_req.state = NVME_TCP_REQ_FREE;
603 
604 	TAILQ_INIT(&tqpair.free_reqs);
605 	TAILQ_INIT(&tqpair.outstanding_reqs);
606 	TAILQ_INSERT_HEAD(&tqpair.free_reqs, &tcp_req, link);
607 
608 	CU_ASSERT(nvme_tcp_req_get(&tqpair) == &tcp_req);
609 	CU_ASSERT(tcp_req.state == NVME_TCP_REQ_ACTIVE);
610 	CU_ASSERT(tcp_req.datao == 0);
611 	CU_ASSERT(tcp_req.req == NULL);
612 	CU_ASSERT(tcp_req.in_capsule_data == false);
613 	CU_ASSERT(tcp_req.r2tl_remain == 0);
614 	CU_ASSERT(tcp_req.iovcnt == 0);
615 	CU_ASSERT(tcp_req.ordering.raw == 0);
616 	/* outstanding_reqs should still be empty - caller is responsible
617 	 * for putting it on the TAILQ after any other initialization is
618 	 * completed.
619 	 */
620 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
621 	CU_ASSERT(TAILQ_EMPTY(&tqpair.free_reqs));
622 
623 	/* No tcp request available, expect fail */
624 	SPDK_CU_ASSERT_FATAL(nvme_tcp_req_get(&tqpair) == NULL);
625 }
626 
627 static void
628 test_nvme_tcp_qpair_capsule_cmd_send(void)
629 {
630 	struct nvme_tcp_qpair tqpair = {};
631 	struct spdk_nvme_tcp_stat stats = {};
632 	struct nvme_tcp_req tcp_req = {};
633 	struct nvme_tcp_pdu pdu = {};
634 	struct nvme_request req = {};
635 	char iov_base0[4096];
636 	char iov_base1[4096];
637 	uint32_t plen;
638 	uint8_t pdo;
639 
640 	memset(iov_base0, 0xFF, 4096);
641 	memset(iov_base1, 0xFF, 4096);
642 	tcp_req.req = &req;
643 	tcp_req.pdu = &pdu;
644 	TAILQ_INIT(&tqpair.send_queue);
645 	tqpair.stats = &stats;
646 
647 	tcp_req.iov[0].iov_base = (void *)iov_base0;
648 	tcp_req.iov[0].iov_len = 4096;
649 	tcp_req.iov[1].iov_base = (void *)iov_base1;
650 	tcp_req.iov[1].iov_len = 4096;
651 	tcp_req.iovcnt = 2;
652 	tcp_req.req->payload_size = 8192;
653 	tcp_req.in_capsule_data = true;
654 	tqpair.cpda = NVME_TCP_HPDA_DEFAULT;
655 
656 	/* Test case 1: host hdgst and ddgst enable. Expect: PASS */
657 	tqpair.flags.host_hdgst_enable = 1;
658 	tqpair.flags.host_ddgst_enable = 1;
659 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd) +
660 		     SPDK_NVME_TCP_DIGEST_LEN;
661 	plen += tcp_req.req->payload_size;
662 	plen += SPDK_NVME_TCP_DIGEST_LEN;
663 
664 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
665 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
666 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
667 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
668 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
669 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
670 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
671 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
672 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
673 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
674 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
675 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
676 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
677 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
678 
679 	/* Test case 2: host hdgst and ddgst disable. Expect: PASS */
680 	memset(&pdu, 0, sizeof(pdu));
681 	tqpair.flags.host_hdgst_enable = 0;
682 	tqpair.flags.host_ddgst_enable = 0;
683 
684 	pdo = plen = sizeof(struct spdk_nvme_tcp_cmd);
685 	plen += tcp_req.req->payload_size;
686 
687 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
688 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
689 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags == 0)
690 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
691 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
692 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
693 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
694 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
695 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
696 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
697 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
698 
699 	/* Test case 3: padding available. Expect: PASS */
700 	memset(&pdu, 0, sizeof(pdu));
701 	tqpair.flags.host_hdgst_enable = 1;
702 	tqpair.flags.host_ddgst_enable = 1;
703 	tqpair.cpda = SPDK_NVME_TCP_CPDA_MAX;
704 
705 	pdo = plen = (SPDK_NVME_TCP_CPDA_MAX + 1) << 2;
706 	plen += tcp_req.req->payload_size;
707 	plen += SPDK_NVME_TCP_DIGEST_LEN;
708 
709 	nvme_tcp_qpair_capsule_cmd_send(&tqpair, &tcp_req);
710 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
711 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
712 		  & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
713 	CU_ASSERT(pdu.hdr.capsule_cmd.common.flags
714 		  & SPDK_NVME_TCP_CH_FLAGS_DDGSTF);
715 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdu_type ==
716 		  SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
717 	CU_ASSERT(pdu.hdr.capsule_cmd.common.pdo == pdo);
718 	CU_ASSERT(pdu.hdr.capsule_cmd.common.plen == plen);
719 	CU_ASSERT(pdu.data_iov[0].iov_base == tcp_req.iov[0].iov_base);
720 	CU_ASSERT(pdu.data_iov[0].iov_len == tcp_req.iov[0].iov_len);
721 	CU_ASSERT(pdu.data_iov[1].iov_base == tcp_req.iov[1].iov_base);
722 	CU_ASSERT(pdu.data_iov[1].iov_len == tcp_req.iov[0].iov_len);
723 }
724 
725 /* Just define, nothing to do */
726 static void
727 ut_nvme_tcp_qpair_xfer_complete_cb(void *cb_arg)
728 {
729 	return;
730 }
731 
732 static void
733 test_nvme_tcp_qpair_write_pdu(void)
734 {
735 	struct nvme_tcp_qpair tqpair = {};
736 	struct spdk_nvme_tcp_stat stats = {};
737 	struct nvme_request req = {};
738 	struct nvme_tcp_req treq = { .req = &req };
739 	struct nvme_tcp_pdu pdu = { .req = &treq };
740 	void *cb_arg = (void *)0xDEADBEEF;
741 	char iov_base0[4096];
742 	char iov_base1[4096];
743 
744 	memset(iov_base0, 0xFF, 4096);
745 	memset(iov_base1, 0xFF, 4096);
746 	pdu.data_len = 4096 * 2;
747 	pdu.padding_len = 0;
748 	pdu.data_iov[0].iov_base = (void *)iov_base0;
749 	pdu.data_iov[0].iov_len = 4096;
750 	pdu.data_iov[1].iov_base = (void *)iov_base1;
751 	pdu.data_iov[1].iov_len = 4096;
752 	pdu.data_iovcnt = 2;
753 	TAILQ_INIT(&tqpair.send_queue);
754 
755 	/* Test case1: host hdgst and ddgst enable Expect: PASS */
756 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
757 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
758 
759 	pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
760 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
761 	pdu.hdr.common.plen = pdu.hdr.common.hlen +
762 			      SPDK_NVME_TCP_DIGEST_LEN * 2 ;
763 	pdu.hdr.common.plen += pdu.data_len;
764 	tqpair.flags.host_hdgst_enable = 1;
765 	tqpair.flags.host_ddgst_enable = 1;
766 	tqpair.stats = &stats;
767 
768 	nvme_tcp_qpair_write_pdu(&tqpair,
769 				 &pdu,
770 				 ut_nvme_tcp_qpair_xfer_complete_cb,
771 				 cb_arg);
772 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
773 	/* Check the crc data of header digest filled into raw */
774 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen]);
775 	CU_ASSERT(pdu.data_digest[0]);
776 	CU_ASSERT(pdu.sock_req.iovcnt == 4);
777 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
778 	CU_ASSERT(pdu.iov[0].iov_len == (sizeof(struct spdk_nvme_tcp_cmd) +
779 					 SPDK_NVME_TCP_DIGEST_LEN));
780 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
781 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
782 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
783 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
784 	CU_ASSERT(pdu.iov[3].iov_base == &pdu.data_digest);
785 	CU_ASSERT(pdu.iov[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
786 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
787 	CU_ASSERT(pdu.cb_arg == cb_arg);
788 	CU_ASSERT(pdu.qpair == &tqpair);
789 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
790 
791 	/* Test case2: host hdgst and ddgst disable Expect: PASS */
792 	memset(pdu.hdr.raw, 0, SPDK_NVME_TCP_TERM_REQ_PDU_MAX_SIZE);
793 	memset(pdu.data_digest, 0, SPDK_NVME_TCP_DIGEST_LEN);
794 
795 	pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
796 	pdu.hdr.common.plen = pdu.hdr.common.hlen  + pdu.data_len;
797 	tqpair.flags.host_hdgst_enable = 0;
798 	tqpair.flags.host_ddgst_enable = 0;
799 
800 	nvme_tcp_qpair_write_pdu(&tqpair,
801 				 &pdu,
802 				 ut_nvme_tcp_qpair_xfer_complete_cb,
803 				 cb_arg);
804 	TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
805 	CU_ASSERT(pdu.hdr.raw[pdu.hdr.common.hlen] == 0);
806 	CU_ASSERT(pdu.data_digest[0] == 0);
807 	CU_ASSERT(pdu.sock_req.iovcnt == 3);
808 	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
809 	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd));
810 	CU_ASSERT(pdu.iov[1].iov_base == pdu.data_iov[0].iov_base);
811 	CU_ASSERT(pdu.iov[1].iov_len == pdu.data_iov[0].iov_len);
812 	CU_ASSERT(pdu.iov[2].iov_base == pdu.data_iov[1].iov_base);
813 	CU_ASSERT(pdu.iov[2].iov_len == pdu.data_iov[1].iov_len);
814 	CU_ASSERT(pdu.cb_fn == ut_nvme_tcp_qpair_xfer_complete_cb);
815 	CU_ASSERT(pdu.cb_arg == cb_arg);
816 	CU_ASSERT(pdu.qpair == &tqpair);
817 	CU_ASSERT(pdu.sock_req.cb_arg == (void *)&pdu);
818 }
819 
820 static void
821 test_nvme_tcp_qpair_set_recv_state(void)
822 {
823 	struct nvme_tcp_qpair tqpair = {};
824 
825 	/* case1: The recv state of tqpair is same with the state to be set */
826 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
827 	nvme_tcp_qpair_set_recv_state(&tqpair, NVME_TCP_PDU_RECV_STATE_ERROR);
828 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_ERROR);
829 
830 	/* Different state will be set accordingly */
831 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY;
832 	nvme_tcp_qpair_set_recv_state(&tqpair, 0xff);
833 	CU_ASSERT(tqpair.recv_state == 0xff);
834 }
835 
836 static void
837 test_nvme_tcp_alloc_reqs(void)
838 {
839 	struct nvme_tcp_qpair tqpair = {};
840 	int rc = 0;
841 
842 	/* case1: single entry. Expect: PASS */
843 	tqpair.num_entries = 1;
844 	rc = nvme_tcp_alloc_reqs(&tqpair);
845 	CU_ASSERT(rc == 0);
846 	CU_ASSERT(tqpair.tcp_reqs[0].cid == 0);
847 	CU_ASSERT(tqpair.tcp_reqs[0].tqpair == &tqpair);
848 	CU_ASSERT(tqpair.tcp_reqs[0].pdu == &tqpair.send_pdus[0]);
849 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
850 	free(tqpair.tcp_reqs);
851 	spdk_free(tqpair.send_pdus);
852 
853 	/* case2: multiple entries. Expect: PASS */
854 	tqpair.num_entries = 5;
855 	rc = nvme_tcp_alloc_reqs(&tqpair);
856 	CU_ASSERT(rc == 0);
857 	for (int i = 0; i < tqpair.num_entries; i++) {
858 		CU_ASSERT(tqpair.tcp_reqs[i].cid == i);
859 		CU_ASSERT(tqpair.tcp_reqs[i].tqpair == &tqpair);
860 		CU_ASSERT(tqpair.tcp_reqs[i].pdu == &tqpair.send_pdus[i]);
861 	}
862 	CU_ASSERT(tqpair.send_pdu == &tqpair.send_pdus[tqpair.num_entries]);
863 
864 	/* case3: Test nvme_tcp_free_reqs test. Expect: PASS */
865 	nvme_tcp_free_reqs(&tqpair);
866 	CU_ASSERT(tqpair.tcp_reqs == NULL);
867 	CU_ASSERT(tqpair.send_pdus == NULL);
868 }
869 
870 static void
871 test_nvme_tcp_qpair_send_h2c_term_req(void)
872 {
873 	struct nvme_tcp_qpair tqpair = {};
874 	struct spdk_nvme_tcp_stat stats = {};
875 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {}, send_pdu = {};
876 	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
877 	uint32_t error_offset = 1;
878 
879 	tqpair.send_pdu = &send_pdu;
880 	tqpair.recv_pdu = &recv_pdu;
881 	tqpair.stats = &stats;
882 	TAILQ_INIT(&tqpair.send_queue);
883 	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
884 	pdu.hdr.common.hlen = 64;
885 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
886 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
887 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
888 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
889 		  pdu.hdr.common.hlen);
890 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
891 
892 	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
893 	pdu.hdr.common.hlen = 255;
894 	nvme_tcp_qpair_send_h2c_term_req(&tqpair, &pdu, fes, error_offset);
895 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
896 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
897 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == (unsigned)
898 		  tqpair.send_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
899 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
900 }
901 
902 static void
903 test_nvme_tcp_pdu_ch_handle(void)
904 {
905 	struct nvme_tcp_qpair tqpair = {};
906 	struct spdk_nvme_tcp_stat stats = {};
907 	struct nvme_tcp_pdu send_pdu = {}, recv_pdu = {};
908 
909 	tqpair.send_pdu = &send_pdu;
910 	tqpair.recv_pdu = &recv_pdu;
911 	tqpair.stats = &stats;
912 	TAILQ_INIT(&tqpair.send_queue);
913 	/* case 1: Already received IC_RESP PDU. Expect: fail */
914 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
915 	tqpair.state = NVME_TCP_QPAIR_STATE_INITIALIZING;
916 	nvme_tcp_pdu_ch_handle(&tqpair);
917 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
918 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
919 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
920 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
921 
922 	/* case 2: Expected PDU header length and received are different. Expect: fail */
923 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
924 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
925 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
926 	tqpair.recv_pdu->hdr.common.hlen = 0;
927 	nvme_tcp_pdu_ch_handle(&tqpair);
928 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
929 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
930 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
931 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
932 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 2);
933 
934 	/* case 3: The TCP/IP tqpair connection is not negotiated. Expect: fail */
935 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
936 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
937 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
938 	tqpair.recv_pdu->hdr.common.hlen = 0;
939 	nvme_tcp_pdu_ch_handle(&tqpair);
940 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
941 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
942 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
943 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen);
944 
945 	/* case 4: Unexpected PDU type. Expect: fail */
946 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
947 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
948 	tqpair.recv_pdu->hdr.common.plen = 0;
949 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
950 	nvme_tcp_pdu_ch_handle(&tqpair);
951 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
952 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
953 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
954 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
955 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
956 
957 	/* case 5: plen error. Expect: fail */
958 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
959 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
960 	tqpair.recv_pdu->hdr.common.plen = 0;
961 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
962 	nvme_tcp_pdu_ch_handle(&tqpair);
963 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
964 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
965 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
966 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
967 		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
968 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
969 
970 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
971 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
972 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
973 	tqpair.recv_pdu->hdr.common.plen = 0;
974 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_rsp);
975 	nvme_tcp_pdu_ch_handle(&tqpair);
976 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
977 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
978 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
979 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
980 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
981 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
982 
983 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
984 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
985 	tqpair.recv_pdu->hdr.common.plen = 0;
986 	tqpair.recv_pdu->hdr.common.pdo = 64;
987 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_c2h_data_hdr);
988 	nvme_tcp_pdu_ch_handle(&tqpair);
989 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
990 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
991 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
992 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
993 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
994 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
995 
996 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
997 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
998 	tqpair.recv_pdu->hdr.common.plen = 0;
999 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
1000 	nvme_tcp_pdu_ch_handle(&tqpair);
1001 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1002 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1003 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1004 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1005 		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1006 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1007 
1008 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_R2T;
1009 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1010 	tqpair.recv_pdu->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1011 	tqpair.recv_pdu->hdr.common.plen = 0;
1012 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_r2t_hdr);
1013 	nvme_tcp_pdu_ch_handle(&tqpair);
1014 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1015 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ);
1016 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1017 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.common.plen == tqpair.send_pdu->hdr.term_req.common.hlen +
1018 		  (unsigned)sizeof(struct spdk_nvme_tcp_r2t_hdr));
1019 	CU_ASSERT(tqpair.send_pdu->hdr.term_req.fei[0] == 4);
1020 
1021 	/* case 6: Expect:  PASS */
1022 	tqpair.recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1023 	tqpair.state = NVME_TCP_QPAIR_STATE_INVALID;
1024 	tqpair.recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1025 	tqpair.recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1026 	nvme_tcp_pdu_ch_handle(&tqpair);
1027 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1028 	CU_ASSERT(tqpair.recv_pdu->psh_len == tqpair.recv_pdu->hdr.common.hlen - sizeof(
1029 			  struct spdk_nvme_tcp_common_pdu_hdr));
1030 }
1031 
1032 DEFINE_RETURN_MOCK(spdk_sock_connect_ext, struct spdk_sock *);
1033 struct spdk_sock *
1034 spdk_sock_connect_ext(const char *ip, int port,
1035 		      const char *_impl_name, struct spdk_sock_opts *opts)
1036 {
1037 	HANDLE_RETURN_MOCK(spdk_sock_connect_ext);
1038 	CU_ASSERT(port == 23);
1039 	CU_ASSERT(opts->opts_size == sizeof(*opts));
1040 	CU_ASSERT(opts->priority == 1);
1041 	CU_ASSERT(opts->zcopy == true);
1042 	CU_ASSERT(!strcmp(ip, "192.168.1.78"));
1043 	return (struct spdk_sock *)0xDDADBEEF;
1044 }
1045 
1046 static void
1047 test_nvme_tcp_qpair_connect_sock(void)
1048 {
1049 	struct nvme_tcp_ctrlr tctrlr = {};
1050 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1051 	struct nvme_tcp_qpair tqpair = {};
1052 	int rc;
1053 
1054 	tqpair.qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1055 	tqpair.qpair.id = 1;
1056 	tqpair.qpair.poll_group = (void *)0xDEADBEEF;
1057 	ctrlr->trid.priority = 1;
1058 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1059 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1060 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1061 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1062 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1063 
1064 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1065 	CU_ASSERT(rc == 0);
1066 
1067 	/* Unsupported family of the transport address */
1068 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IB;
1069 
1070 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1071 	SPDK_CU_ASSERT_FATAL(rc == -1);
1072 
1073 	/* Invalid dst_port, INT_MAX is 2147483647 */
1074 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1075 	memcpy(ctrlr->trid.trsvcid, "2147483647", sizeof("2147483647"));
1076 
1077 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1078 	SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
1079 
1080 	/* Parse invalid address */
1081 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1082 	memcpy(ctrlr->trid.traddr, "192.168.1.256", sizeof("192.168.1.256"));
1083 
1084 	rc = nvme_tcp_qpair_connect_sock(ctrlr, &tqpair.qpair);
1085 	SPDK_CU_ASSERT_FATAL(rc != 0);
1086 }
1087 
1088 static void
1089 test_nvme_tcp_qpair_icreq_send(void)
1090 {
1091 	struct nvme_tcp_qpair tqpair = {};
1092 	struct spdk_nvme_tcp_stat stats = {};
1093 	struct spdk_nvme_ctrlr ctrlr = {};
1094 	struct nvme_tcp_pdu pdu = {};
1095 	struct nvme_tcp_poll_group poll_group = {};
1096 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1097 	int rc;
1098 
1099 	tqpair.send_pdu = &pdu;
1100 	tqpair.qpair.ctrlr = &ctrlr;
1101 	tqpair.qpair.poll_group = &poll_group.group;
1102 	tqpair.stats = &stats;
1103 	ic_req = &pdu.hdr.ic_req;
1104 
1105 	tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
1106 	tqpair.qpair.ctrlr->opts.header_digest = true;
1107 	tqpair.qpair.ctrlr->opts.data_digest = true;
1108 	TAILQ_INIT(&tqpair.send_queue);
1109 
1110 	rc = nvme_tcp_qpair_icreq_send(&tqpair);
1111 	CU_ASSERT(rc == 0);
1112 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1113 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1114 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1115 	CU_ASSERT(ic_req->pfv == 0);
1116 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1117 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1118 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1119 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1120 }
1121 
1122 static void
1123 test_nvme_tcp_c2h_payload_handle(void)
1124 {
1125 	struct nvme_tcp_qpair tqpair = {};
1126 	struct spdk_nvme_tcp_stat stats = {};
1127 	struct nvme_tcp_pdu pdu = {};
1128 	struct nvme_tcp_req tcp_req = {};
1129 	struct nvme_request	req = {};
1130 	struct nvme_tcp_pdu recv_pdu = {};
1131 	uint32_t reaped = 1;
1132 
1133 	tcp_req.req = &req;
1134 	tcp_req.req->qpair = &tqpair.qpair;
1135 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1136 	tcp_req.tqpair = &tqpair;
1137 	tcp_req.cid = 1;
1138 	tqpair.stats = &stats;
1139 
1140 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1141 
1142 	pdu.req = &tcp_req;
1143 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1144 					SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1145 	pdu.data_len = 1024;
1146 
1147 	tqpair.qpair.id = 1;
1148 	tqpair.recv_pdu = &recv_pdu;
1149 
1150 	/* case 1: nvme_tcp_c2h_data_payload_handle: tcp_req->datao != tcp_req->req->payload_size */
1151 	tcp_req.datao = 1024;
1152 	tcp_req.req->payload_size = 2048;
1153 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1154 	tcp_req.ordering.bits.send_ack = 1;
1155 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1156 	tcp_req.ordering.bits.data_recv = 0;
1157 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1158 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1159 	tqpair.qpair.num_outstanding_reqs = 1;
1160 
1161 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1162 
1163 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1164 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1165 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1166 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1167 	CU_ASSERT(reaped == 2);
1168 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1169 
1170 	/* case 2: nvme_tcp_c2h_data_payload_handle: tcp_req->datao == tcp_req->req->payload_size */
1171 	tcp_req.datao = 1024;
1172 	tcp_req.req->payload_size = 1024;
1173 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1174 	tcp_req.ordering.bits.send_ack = 1;
1175 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1176 	tcp_req.ordering.bits.data_recv = 0;
1177 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1178 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1179 	tqpair.qpair.num_outstanding_reqs = 1;
1180 
1181 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1182 
1183 	CU_ASSERT(tcp_req.rsp.status.p == 1);
1184 	CU_ASSERT(tcp_req.rsp.cid == tcp_req.cid);
1185 	CU_ASSERT(tcp_req.rsp.sqid == tqpair.qpair.id);
1186 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1187 	CU_ASSERT(reaped == 3);
1188 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1189 
1190 	/* case 3: nvme_tcp_c2h_data_payload_handle: flag does not have SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS */
1191 	pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1192 	tcp_req.datao = 1024;
1193 	tcp_req.req->payload_size = 1024;
1194 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1195 	tcp_req.ordering.bits.send_ack = 1;
1196 	memset(&tcp_req.rsp, 0, sizeof(tcp_req.rsp));
1197 	tcp_req.ordering.bits.data_recv = 0;
1198 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
1199 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1200 	tqpair.qpair.num_outstanding_reqs = 1;
1201 
1202 	nvme_tcp_c2h_data_payload_handle(&tqpair, &pdu, &reaped);
1203 
1204 	CU_ASSERT(reaped == 3);
1205 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1206 
1207 	/* case 4: nvme_tcp_c2h_term_req_payload_handle: recv_state is NVME_TCP_PDU_RECV_STATE_ERROR */
1208 	pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1209 	nvme_tcp_c2h_term_req_payload_handle(&tqpair, &pdu);
1210 
1211 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1212 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 1);
1213 }
1214 
1215 static void
1216 test_nvme_tcp_icresp_handle(void)
1217 {
1218 	struct nvme_tcp_qpair tqpair = {};
1219 	struct spdk_nvme_tcp_stat stats = {};
1220 	struct nvme_tcp_pdu pdu = {};
1221 	struct nvme_tcp_pdu send_pdu = {};
1222 	struct nvme_tcp_pdu recv_pdu = {};
1223 
1224 	tqpair.send_pdu = &send_pdu;
1225 	tqpair.recv_pdu = &recv_pdu;
1226 	tqpair.stats = &stats;
1227 	TAILQ_INIT(&tqpair.send_queue);
1228 
1229 	/* case 1: Expected ICResp PFV and got are different. */
1230 	pdu.hdr.ic_resp.pfv = 1;
1231 
1232 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1233 
1234 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1235 
1236 	/* case 2: Expected ICResp maxh2cdata and got are different. */
1237 	pdu.hdr.ic_resp.pfv = 0;
1238 	pdu.hdr.ic_resp.maxh2cdata = 2048;
1239 
1240 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1241 
1242 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1243 
1244 	/* case 3: Expected ICResp cpda and got are different. */
1245 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1246 	pdu.hdr.ic_resp.cpda = 64;
1247 
1248 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1249 
1250 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1251 
1252 	/* case 4: waiting icreq ack. */
1253 	pdu.hdr.ic_resp.maxh2cdata = NVME_TCP_PDU_H2C_MIN_DATA_SIZE;
1254 	pdu.hdr.ic_resp.cpda = 30;
1255 	pdu.hdr.ic_resp.dgst.bits.hdgst_enable = true;
1256 	pdu.hdr.ic_resp.dgst.bits.ddgst_enable = true;
1257 	tqpair.flags.icreq_send_ack = 0;
1258 
1259 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1260 
1261 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1262 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_INITIALIZING);
1263 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1264 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1265 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1266 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1267 
1268 	/* case 5: Expect: PASS. */
1269 	tqpair.flags.icreq_send_ack = 1;
1270 
1271 	nvme_tcp_icresp_handle(&tqpair, &pdu);
1272 
1273 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1274 	CU_ASSERT(tqpair.state == NVME_TCP_QPAIR_STATE_FABRIC_CONNECT_SEND);
1275 	CU_ASSERT(tqpair.maxh2cdata == pdu.hdr.ic_resp.maxh2cdata);
1276 	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_resp.cpda);
1277 	CU_ASSERT(tqpair.flags.host_hdgst_enable == pdu.hdr.ic_resp.dgst.bits.hdgst_enable);
1278 	CU_ASSERT(tqpair.flags.host_ddgst_enable == pdu.hdr.ic_resp.dgst.bits.ddgst_enable);
1279 }
1280 
1281 static void
1282 test_nvme_tcp_pdu_payload_handle(void)
1283 {
1284 	struct nvme_tcp_qpair	tqpair = {};
1285 	struct spdk_nvme_tcp_stat	stats = {};
1286 	struct nvme_tcp_pdu	recv_pdu = {};
1287 	struct nvme_tcp_req	tcp_req = {};
1288 	struct nvme_request	req = {};
1289 	uint32_t		reaped = 0;
1290 
1291 	tqpair.recv_pdu = &recv_pdu;
1292 	tcp_req.tqpair = &tqpair;
1293 	tcp_req.req = &req;
1294 	tcp_req.req->qpair = &tqpair.qpair;
1295 	tqpair.stats = &stats;
1296 
1297 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1298 	tqpair.qpair.id = 1;
1299 	recv_pdu.ddgst_enable = false;
1300 	recv_pdu.req = &tcp_req;
1301 	recv_pdu.hdr.c2h_data.common.flags = SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS |
1302 					     SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU;
1303 	recv_pdu.data_len = 1024;
1304 	tcp_req.ordering.bits.data_recv = 0;
1305 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1306 	tcp_req.cid = 1;
1307 	TAILQ_INIT(&tcp_req.tqpair->outstanding_reqs);
1308 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1309 	tqpair.qpair.num_outstanding_reqs = 1;
1310 
1311 	/* C2H_DATA */
1312 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_DATA;
1313 	tcp_req.datao = 1024;
1314 	tcp_req.req->payload_size = 2048;
1315 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1316 	tcp_req.ordering.bits.send_ack = 1;
1317 
1318 	recv_pdu.req = &tcp_req;
1319 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1320 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1321 	CU_ASSERT(tcp_req.rsp.status.p == 0);
1322 	CU_ASSERT(tcp_req.rsp.cid == 1);
1323 	CU_ASSERT(tcp_req.rsp.sqid == 1);
1324 	CU_ASSERT(tcp_req.ordering.bits.data_recv == 1);
1325 	CU_ASSERT(reaped == 1);
1326 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1327 
1328 	/* TermResp */
1329 	recv_pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ;
1330 	recv_pdu.hdr.term_req.fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
1331 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD;
1332 
1333 	recv_pdu.req = &tcp_req;
1334 	nvme_tcp_pdu_payload_handle(&tqpair, &reaped);
1335 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1336 }
1337 
1338 static void
1339 test_nvme_tcp_capsule_resp_hdr_handle(void)
1340 {
1341 	struct nvme_tcp_qpair	tqpair = {};
1342 	struct spdk_nvme_ctrlr	ctrlr = {};
1343 	struct spdk_nvme_tcp_stat	stats = {};
1344 	struct nvme_request	req = {};
1345 	struct spdk_nvme_cpl	rccqe_tgt = {};
1346 	struct nvme_tcp_req	*tcp_req = NULL;
1347 	uint32_t		reaped = 0;
1348 	int			rc;
1349 
1350 	/* Initialize requests and pdus */
1351 	tqpair.num_entries = 1;
1352 	tqpair.stats = &stats;
1353 	req.qpair = &tqpair.qpair;
1354 	req.qpair->ctrlr = &ctrlr;
1355 	req.payload = NVME_PAYLOAD_CONTIG(NULL, NULL);
1356 
1357 	rc = nvme_tcp_alloc_reqs(&tqpair);
1358 	SPDK_CU_ASSERT_FATAL(rc == 0);
1359 	tcp_req = nvme_tcp_req_get(&tqpair);
1360 	SPDK_CU_ASSERT_FATAL(tcp_req != NULL);
1361 	rc = nvme_tcp_req_init(&tqpair, &req, tcp_req);
1362 	SPDK_CU_ASSERT_FATAL(rc == 0);
1363 	tcp_req->ordering.bits.send_ack = 1;
1364 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1365 	/* tqpair.recv_pdu will be reset after handling */
1366 	memset(&rccqe_tgt, 0xff, sizeof(rccqe_tgt));
1367 	rccqe_tgt.cid = 0;
1368 	memcpy(&tqpair.recv_pdu->hdr.capsule_resp.rccqe, &rccqe_tgt, sizeof(rccqe_tgt));
1369 	tqpair.qpair.num_outstanding_reqs = 1;
1370 
1371 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1372 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1373 	CU_ASSERT(!memcmp(&tcp_req->rsp, &rccqe_tgt, sizeof(rccqe_tgt)));
1374 	CU_ASSERT(tcp_req->ordering.bits.data_recv == 1);
1375 	CU_ASSERT(reaped == 1);
1376 	CU_ASSERT(TAILQ_EMPTY(&tcp_req->tqpair->outstanding_reqs));
1377 	CU_ASSERT(tqpair.qpair.num_outstanding_reqs == 0);
1378 
1379 	/* Get tcp request error, expect fail */
1380 	reaped = 0;
1381 	tqpair.recv_pdu->hdr.capsule_resp.rccqe.cid = 1;
1382 	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1383 
1384 	nvme_tcp_capsule_resp_hdr_handle(&tqpair, tqpair.recv_pdu, &reaped);
1385 	CU_ASSERT(reaped == 0);
1386 	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1387 	nvme_tcp_free_reqs(&tqpair);
1388 }
1389 
1390 static void
1391 test_nvme_tcp_ctrlr_connect_qpair(void)
1392 {
1393 	struct spdk_nvme_ctrlr ctrlr = {};
1394 	struct spdk_nvme_qpair *qpair;
1395 	struct nvme_tcp_qpair *tqpair;
1396 	struct nvme_tcp_pdu pdu = {};
1397 	struct nvme_tcp_pdu recv_pdu = {};
1398 	struct spdk_nvme_tcp_ic_req *ic_req = NULL;
1399 	int rc;
1400 
1401 	tqpair = calloc(1, sizeof(*tqpair));
1402 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1403 	tqpair->recv_pdu = &recv_pdu;
1404 	qpair = &tqpair->qpair;
1405 	tqpair->sock = (struct spdk_sock *)0xDEADBEEF;
1406 	tqpair->send_pdu = &pdu;
1407 	tqpair->qpair.ctrlr = &ctrlr;
1408 	tqpair->qpair.state = NVME_QPAIR_CONNECTING;
1409 	tqpair->num_entries = 128;
1410 	ic_req = &pdu.hdr.ic_req;
1411 
1412 	tqpair->recv_pdu->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1413 	tqpair->recv_pdu->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_resp);
1414 	tqpair->recv_pdu->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_resp);
1415 	tqpair->recv_pdu->ch_valid_bytes = sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1416 	tqpair->recv_pdu->psh_valid_bytes = tqpair->recv_pdu->hdr.common.hlen -
1417 					    sizeof(struct spdk_nvme_tcp_common_pdu_hdr) - 1;
1418 	tqpair->recv_pdu->hdr.ic_resp.maxh2cdata = 4096;
1419 	tqpair->recv_pdu->hdr.ic_resp.cpda = 1;
1420 	tqpair->flags.icreq_send_ack = 1;
1421 	tqpair->qpair.ctrlr->opts.header_digest = true;
1422 	tqpair->qpair.ctrlr->opts.data_digest = true;
1423 	TAILQ_INIT(&tqpair->send_queue);
1424 
1425 	rc = nvme_tcp_ctrlr_connect_qpair(&ctrlr, qpair);
1426 	CU_ASSERT(rc == 0);
1427 
1428 	/* skip NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY state */
1429 	/* assume already received the icresp */
1430 	tqpair->recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1431 
1432 	while (nvme_qpair_get_state(qpair) == NVME_QPAIR_CONNECTING) {
1433 		rc = nvme_tcp_qpair_process_completions(qpair, 0);
1434 		CU_ASSERT(rc >= 0);
1435 	}
1436 
1437 	CU_ASSERT(tqpair->maxr2t == NVME_TCP_MAX_R2T_DEFAULT);
1438 	CU_ASSERT(tqpair->state == NVME_TCP_QPAIR_STATE_RUNNING);
1439 	CU_ASSERT(tqpair->recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH);
1440 	CU_ASSERT(ic_req->common.hlen == sizeof(*ic_req));
1441 	CU_ASSERT(ic_req->common.plen == sizeof(*ic_req));
1442 	CU_ASSERT(ic_req->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_REQ);
1443 	CU_ASSERT(ic_req->pfv == 0);
1444 	CU_ASSERT(ic_req->maxr2t == NVME_TCP_MAX_R2T_DEFAULT - 1);
1445 	CU_ASSERT(ic_req->hpda == NVME_TCP_HPDA_DEFAULT);
1446 	CU_ASSERT(ic_req->dgst.bits.hdgst_enable == true);
1447 	CU_ASSERT(ic_req->dgst.bits.ddgst_enable == true);
1448 
1449 	nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1450 }
1451 
1452 static void
1453 ut_disconnect_qpair_req_cb(void *ctx, const struct spdk_nvme_cpl *cpl)
1454 {
1455 	CU_ASSERT_EQUAL(cpl->status.sc, SPDK_NVME_SC_ABORTED_SQ_DELETION);
1456 	CU_ASSERT_EQUAL(cpl->status.sct, SPDK_NVME_SCT_GENERIC);
1457 }
1458 
1459 static void
1460 ut_disconnect_qpair_poll_group_cb(struct spdk_nvme_qpair *qpair, void *ctx)
1461 {
1462 	int *disconnected = ctx;
1463 
1464 	(*disconnected)++;
1465 }
1466 
1467 static void
1468 test_nvme_tcp_ctrlr_disconnect_qpair(void)
1469 {
1470 	struct spdk_nvme_ctrlr ctrlr = {};
1471 	struct spdk_nvme_qpair *qpair;
1472 	struct nvme_tcp_pdu pdu = {}, recv_pdu = {};
1473 	struct nvme_tcp_qpair tqpair = {
1474 		.qpair = {
1475 			.trtype = SPDK_NVME_TRANSPORT_TCP,
1476 			.ctrlr = &ctrlr,
1477 			.async = true,
1478 		},
1479 		.recv_pdu = &recv_pdu,
1480 	};
1481 	struct spdk_nvme_poll_group group = {};
1482 	struct nvme_tcp_poll_group tgroup = { .group.group = &group };
1483 	struct nvme_request req = { .qpair = &tqpair.qpair, .cb_fn = ut_disconnect_qpair_req_cb };
1484 	struct nvme_tcp_req treq = { .req = &req, .tqpair = &tqpair };
1485 	int rc, disconnected;
1486 
1487 	qpair = &tqpair.qpair;
1488 	qpair->poll_group = &tgroup.group;
1489 	tqpair.sock = (struct spdk_sock *)0xDEADBEEF;
1490 	tqpair.needs_poll = true;
1491 	TAILQ_INIT(&tgroup.needs_poll);
1492 	STAILQ_INIT(&tgroup.group.disconnected_qpairs);
1493 	TAILQ_INIT(&tqpair.send_queue);
1494 	TAILQ_INIT(&tqpair.free_reqs);
1495 	TAILQ_INIT(&tqpair.outstanding_reqs);
1496 	TAILQ_INSERT_TAIL(&tgroup.needs_poll, &tqpair, link);
1497 	TAILQ_INSERT_TAIL(&tqpair.send_queue, &pdu, tailq);
1498 
1499 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1500 
1501 	CU_ASSERT(tqpair.needs_poll == false);
1502 	CU_ASSERT(tqpair.sock == NULL);
1503 	CU_ASSERT(TAILQ_EMPTY(&tqpair.send_queue) == true);
1504 
1505 	/* Check that outstanding requests are aborted */
1506 	treq.state = NVME_TCP_REQ_ACTIVE;
1507 	qpair->num_outstanding_reqs = 1;
1508 	qpair->state = NVME_QPAIR_DISCONNECTING;
1509 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1510 
1511 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1512 
1513 	CU_ASSERT(TAILQ_EMPTY(&tqpair.outstanding_reqs));
1514 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 0);
1515 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.free_reqs));
1516 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1517 
1518 	/* Check that a request with an accel operation in progress won't be aborted until that
1519 	 * operation is completed */
1520 	treq.state = NVME_TCP_REQ_ACTIVE;
1521 	treq.ordering.bits.in_progress_accel = 1;
1522 	tqpair.async_complete = 0;
1523 	qpair->poll_group = NULL;
1524 	qpair->num_outstanding_reqs = 1;
1525 	qpair->state = NVME_QPAIR_DISCONNECTING;
1526 	TAILQ_REMOVE(&tqpair.free_reqs, &treq, link);
1527 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1528 
1529 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1530 
1531 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1532 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1533 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1534 
1535 	/* Check that a qpair will be transitioned to a DISCONNECTED state only once the accel
1536 	 * operation is completed */
1537 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1538 	CU_ASSERT_EQUAL(rc, 0);
1539 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1540 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1541 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1542 
1543 	treq.ordering.bits.in_progress_accel = 0;
1544 	qpair->num_outstanding_reqs = 0;
1545 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1546 
1547 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1548 	CU_ASSERT_EQUAL(rc, -ENXIO);
1549 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1550 
1551 	/* Check the same scenario but this time with spdk_sock_flush() returning errors */
1552 	treq.state = NVME_TCP_REQ_ACTIVE;
1553 	treq.ordering.bits.in_progress_accel = 1;
1554 	qpair->num_outstanding_reqs = 1;
1555 	qpair->state = NVME_QPAIR_DISCONNECTING;
1556 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1557 
1558 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1559 
1560 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1561 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1562 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1563 
1564 	MOCK_SET(spdk_sock_flush, -ENOTCONN);
1565 	treq.ordering.bits.in_progress_accel = 0;
1566 	qpair->num_outstanding_reqs = 0;
1567 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1568 
1569 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1570 	CU_ASSERT_EQUAL(rc, 0);
1571 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1572 	rc = nvme_tcp_qpair_process_completions(qpair, 0);
1573 	CU_ASSERT_EQUAL(rc, -ENXIO);
1574 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1575 	MOCK_CLEAR(spdk_sock_flush);
1576 
1577 	/* Now check the same scenario, but with a qpair that's part of a poll group */
1578 	disconnected = 0;
1579 	group.ctx = &disconnected;
1580 	treq.state = NVME_TCP_REQ_ACTIVE;
1581 	treq.ordering.bits.in_progress_accel = 1;
1582 	qpair->poll_group = &tgroup.group;
1583 	qpair->num_outstanding_reqs = 1;
1584 	qpair->state = NVME_QPAIR_DISCONNECTING;
1585 	STAILQ_INSERT_TAIL(&tgroup.group.disconnected_qpairs, qpair, poll_group_stailq);
1586 	TAILQ_INSERT_TAIL(&tqpair.outstanding_reqs, &treq, link);
1587 
1588 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1589 						ut_disconnect_qpair_poll_group_cb);
1590 	/* Until there's an outstanding request, disconnect_cb shouldn't be executed */
1591 	CU_ASSERT_EQUAL(disconnected, 0);
1592 	CU_ASSERT_EQUAL(qpair->num_outstanding_reqs, 1);
1593 	CU_ASSERT_EQUAL(&treq, TAILQ_FIRST(&tqpair.outstanding_reqs));
1594 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTING);
1595 
1596 	treq.ordering.bits.in_progress_accel = 0;
1597 	qpair->num_outstanding_reqs = 0;
1598 	TAILQ_REMOVE(&tqpair.outstanding_reqs, &treq, link);
1599 
1600 	nvme_tcp_poll_group_process_completions(&tgroup.group, 0,
1601 						ut_disconnect_qpair_poll_group_cb);
1602 	CU_ASSERT_EQUAL(disconnected, 1);
1603 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1604 
1605 	/* Check that a non-async qpair is marked as disconnected immediately */
1606 	qpair->poll_group = NULL;
1607 	qpair->state = NVME_QPAIR_DISCONNECTING;
1608 	qpair->async = false;
1609 
1610 	nvme_tcp_ctrlr_disconnect_qpair(&ctrlr, qpair);
1611 
1612 	CU_ASSERT_EQUAL(qpair->state, NVME_QPAIR_DISCONNECTED);
1613 }
1614 
1615 static void
1616 test_nvme_tcp_ctrlr_create_io_qpair(void)
1617 {
1618 	struct spdk_nvme_qpair *qpair = NULL;
1619 	struct nvme_tcp_ctrlr tctrlr = {};
1620 	struct spdk_nvme_ctrlr *ctrlr = &tctrlr.ctrlr;
1621 	uint16_t qid = 1;
1622 	struct spdk_nvme_io_qpair_opts opts = {
1623 		.io_queue_size = 2,
1624 		.qprio = SPDK_NVME_QPRIO_URGENT,
1625 		.io_queue_requests = 1,
1626 	};
1627 	struct nvme_tcp_qpair *tqpair;
1628 
1629 	ctrlr->trid.priority = 1;
1630 	ctrlr->trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1631 	memcpy(ctrlr->trid.traddr, "192.168.1.78", sizeof("192.168.1.78"));
1632 	memcpy(ctrlr->trid.trsvcid, "23", sizeof("23"));
1633 	memcpy(ctrlr->opts.src_addr, "192.168.1.77", sizeof("192.168.1.77"));
1634 	memcpy(ctrlr->opts.src_svcid, "23", sizeof("23"));
1635 
1636 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1637 	tqpair = nvme_tcp_qpair(qpair);
1638 
1639 	CU_ASSERT(qpair != NULL);
1640 	CU_ASSERT(qpair->id == 1);
1641 	CU_ASSERT(qpair->ctrlr == ctrlr);
1642 	CU_ASSERT(qpair->qprio == SPDK_NVME_QPRIO_URGENT);
1643 	CU_ASSERT(qpair->trtype == SPDK_NVME_TRANSPORT_TCP);
1644 	CU_ASSERT(qpair->poll_group == (void *)0xDEADBEEF);
1645 	CU_ASSERT(tqpair->num_entries == 1);
1646 
1647 	free(tqpair->tcp_reqs);
1648 	spdk_free(tqpair->send_pdus);
1649 	free(tqpair);
1650 
1651 	/* Max queue size shall pass */
1652 	opts.io_queue_size = 0xffff;
1653 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1654 	tqpair = nvme_tcp_qpair(qpair);
1655 
1656 	CU_ASSERT(qpair != NULL);
1657 	CU_ASSERT(tqpair->num_entries == 0xfffe);
1658 
1659 	free(tqpair->tcp_reqs);
1660 	spdk_free(tqpair->send_pdus);
1661 	free(tqpair);
1662 
1663 	/* Queue size 0 shall fail */
1664 	opts.io_queue_size = 0;
1665 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1666 	CU_ASSERT(qpair == NULL);
1667 
1668 	/* Queue size 1 shall fail */
1669 	opts.io_queue_size = 1;
1670 	qpair = nvme_tcp_ctrlr_create_io_qpair(ctrlr, qid, &opts);
1671 	CU_ASSERT(qpair == NULL);
1672 }
1673 
1674 static void
1675 test_nvme_tcp_ctrlr_delete_io_qpair(void)
1676 {
1677 	struct spdk_nvme_ctrlr	ctrlr = {};
1678 	struct spdk_nvme_qpair *qpair;
1679 	struct nvme_tcp_qpair *tqpair;
1680 	struct nvme_tcp_req tcp_req = {};
1681 	struct nvme_request	req = {};
1682 	int rc;
1683 
1684 	tqpair = calloc(1, sizeof(struct nvme_tcp_qpair));
1685 	tqpair->tcp_reqs = calloc(1, sizeof(struct nvme_tcp_req));
1686 	tqpair->send_pdus = calloc(1, sizeof(struct nvme_tcp_pdu));
1687 	tqpair->qpair.trtype = SPDK_NVME_TRANSPORT_TCP;
1688 	qpair = &tqpair->qpair;
1689 	qpair->ctrlr = &ctrlr;
1690 	tcp_req.req = &req;
1691 	tcp_req.req->qpair = &tqpair->qpair;
1692 	tcp_req.req->cb_fn = ut_nvme_complete_request;
1693 	tcp_req.tqpair = tqpair;
1694 	tcp_req.state = NVME_TCP_REQ_ACTIVE;
1695 	TAILQ_INIT(&tqpair->outstanding_reqs);
1696 	TAILQ_INSERT_TAIL(&tcp_req.tqpair->outstanding_reqs, &tcp_req, link);
1697 	qpair->num_outstanding_reqs = 1;
1698 
1699 	rc = nvme_tcp_ctrlr_delete_io_qpair(&ctrlr, qpair);
1700 
1701 	CU_ASSERT(rc == 0);
1702 }
1703 
1704 static void
1705 test_nvme_tcp_poll_group_get_stats(void)
1706 {
1707 	int rc = 0;
1708 	struct spdk_sock_group sgroup = {};
1709 	struct nvme_tcp_poll_group *pgroup = NULL;
1710 	struct spdk_nvme_transport_poll_group *tgroup = NULL;
1711 	struct spdk_nvme_transport_poll_group_stat *tgroup_stat = NULL;
1712 
1713 	MOCK_SET(spdk_sock_group_create, &sgroup);
1714 	tgroup = nvme_tcp_poll_group_create();
1715 	CU_ASSERT(tgroup != NULL);
1716 	pgroup = nvme_tcp_poll_group(tgroup);
1717 	CU_ASSERT(pgroup != NULL);
1718 
1719 	/* Invalid group pointer, expect fail and return -EINVAL */
1720 	rc = nvme_tcp_poll_group_get_stats(NULL, &tgroup_stat);
1721 	CU_ASSERT(rc == -EINVAL);
1722 	CU_ASSERT(tgroup_stat == NULL);
1723 
1724 	/* Invalid stats, expect fail and return -EINVAL */
1725 	rc = nvme_tcp_poll_group_get_stats(tgroup, NULL);
1726 	CU_ASSERT(rc == -EINVAL);
1727 
1728 	/* Get stats success */
1729 	rc = nvme_tcp_poll_group_get_stats(tgroup, &tgroup_stat);
1730 	CU_ASSERT(rc == 0);
1731 	CU_ASSERT(tgroup_stat != NULL);
1732 	CU_ASSERT(tgroup_stat->trtype == SPDK_NVME_TRANSPORT_TCP);
1733 	CU_ASSERT(memcmp(&tgroup_stat->tcp, &pgroup->stats, sizeof(struct spdk_nvme_tcp_stat)) == 0);
1734 
1735 	nvme_tcp_poll_group_free_stats(tgroup, tgroup_stat);
1736 	rc = nvme_tcp_poll_group_destroy(tgroup);
1737 	CU_ASSERT(rc == 0);
1738 
1739 	MOCK_CLEAR(spdk_sock_group_create);
1740 }
1741 
1742 static void
1743 test_nvme_tcp_ctrlr_construct(void)
1744 {
1745 	struct nvme_tcp_qpair *tqpair = NULL;
1746 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1747 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1748 	struct spdk_nvme_transport_id trid = {
1749 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1750 		.priority = 1,
1751 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1752 		.traddr = "192.168.1.78",
1753 		.trsvcid = "23",
1754 	};
1755 	struct spdk_nvme_ctrlr_opts opts = {
1756 		.admin_queue_size = 2,
1757 		.src_addr = "192.168.1.77",
1758 		.src_svcid = "23",
1759 	};
1760 
1761 	/* Transmit ACK timeout value exceeds max, expected to pass and using max */
1762 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1763 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDEADBEEF);
1764 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1765 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1766 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1767 
1768 	CU_ASSERT(ctrlr != NULL);
1769 	CU_ASSERT(tctrlr != NULL);
1770 	CU_ASSERT(tqpair != NULL);
1771 	CU_ASSERT(ctrlr->opts.transport_ack_timeout == NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
1772 	CU_ASSERT(memcmp(&ctrlr->trid, &trid, sizeof(struct spdk_nvme_transport_id)) == 0);
1773 	CU_ASSERT(tqpair->num_entries == 1);
1774 	CU_ASSERT(TAILQ_EMPTY(&tqpair->send_queue));
1775 	CU_ASSERT(TAILQ_EMPTY(&tqpair->outstanding_reqs));
1776 	CU_ASSERT(!TAILQ_EMPTY(&tqpair->free_reqs));
1777 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs) == &tqpair->tcp_reqs[0]);
1778 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->cid == 0);
1779 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->tqpair == tqpair);
1780 	CU_ASSERT(TAILQ_FIRST(&tqpair->free_reqs)->pdu == &tqpair->send_pdus[0]);
1781 	CU_ASSERT(tqpair->send_pdu == &tqpair->send_pdus[1]);
1782 	CU_ASSERT(tqpair->recv_pdu == &tqpair->send_pdus[2]);
1783 
1784 	free(tqpair->tcp_reqs);
1785 	spdk_free(tqpair->send_pdus);
1786 	free(tqpair);
1787 	free(tctrlr);
1788 
1789 	/* The Admin queue size is less than the minimum required size, expected to create Admin qpair failed */
1790 	opts.admin_queue_size = 1;
1791 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1792 	CU_ASSERT(ctrlr == NULL);
1793 
1794 	/* Unhandled ADRFAM, expected to create Admin qpair failed */
1795 	opts.admin_queue_size = 2;
1796 	trid.adrfam = SPDK_NVMF_ADRFAM_INTRA_HOST;
1797 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1798 	CU_ASSERT(ctrlr == NULL);
1799 
1800 	/* Error connecting socket, expected to create Admin qpair failed */
1801 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
1802 	MOCK_SET(spdk_sock_connect_ext, NULL);
1803 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1804 	CU_ASSERT(ctrlr == NULL);
1805 
1806 	MOCK_CLEAR(spdk_sock_connect_ext);
1807 }
1808 
1809 static void
1810 test_nvme_tcp_qpair_submit_request(void)
1811 {
1812 	int rc = 0;
1813 	struct nvme_tcp_ctrlr *tctrlr = NULL;
1814 	struct nvme_tcp_qpair *tqpair = NULL;
1815 	struct spdk_nvme_ctrlr *ctrlr = NULL;
1816 	struct nvme_tcp_req *tcp_req = NULL;
1817 	struct nvme_request req = {};
1818 	struct nvme_tcp_ut_bdev_io bio = {};
1819 	struct spdk_nvme_tcp_stat stat = {};
1820 	struct spdk_nvme_transport_id trid = {
1821 		.trtype = SPDK_NVME_TRANSPORT_TCP,
1822 		.priority = 1,
1823 		.adrfam = SPDK_NVMF_ADRFAM_IPV4,
1824 		.traddr = "192.168.1.78",
1825 		.trsvcid = "23",
1826 	};
1827 	struct spdk_nvme_ctrlr_opts opts = {
1828 		.admin_queue_size = 2,
1829 		.src_addr = "192.168.1.77",
1830 		.src_svcid = "23",
1831 	};
1832 
1833 	/* Construct TCP Controller */
1834 	opts.transport_ack_timeout = NVME_TCP_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
1835 	MOCK_SET(spdk_sock_connect_ext, (struct spdk_sock *)0xDCADBEEF);
1836 
1837 	ctrlr = nvme_tcp_ctrlr_construct(&trid, &opts, NULL);
1838 	CU_ASSERT(ctrlr != NULL);
1839 	tctrlr = nvme_tcp_ctrlr(ctrlr);
1840 	tqpair = nvme_tcp_qpair(tctrlr->ctrlr.adminq);
1841 	tcp_req = TAILQ_FIRST(&tqpair->free_reqs);
1842 	CU_ASSERT(tctrlr != NULL);
1843 	CU_ASSERT(tqpair != NULL);
1844 	CU_ASSERT(tcp_req->pdu != NULL);
1845 	CU_ASSERT(tqpair->num_entries == 1);
1846 
1847 	tqpair->stats = &stat;
1848 	req.qpair = &tqpair->qpair;
1849 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
1850 	req.payload = NVME_PAYLOAD_SGL(nvme_tcp_ut_reset_sgl, nvme_tcp_ut_next_sge, &bio, NULL);
1851 
1852 	/* Failed to construct request, because not enough max_sges */
1853 	req.qpair->ctrlr->max_sges = 1;
1854 	req.payload_size = 2048;
1855 	req.payload_offset = 0;
1856 	bio.iovpos = 0;
1857 	bio.iovs[0].iov_len = 1024;
1858 	bio.iovs[1].iov_len = 1024;
1859 	bio.iovs[0].iov_base = (void *)0xDEADBEEF;
1860 	bio.iovs[1].iov_base = (void *)0xDFADBEEF;
1861 
1862 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1863 	CU_ASSERT(rc == -1);
1864 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->free_reqs));
1865 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_FREE);
1866 
1867 	/* Multiple SGL, expected to pass */
1868 	req.qpair->ctrlr->max_sges = 2;
1869 
1870 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1871 	CU_ASSERT(rc == 0);
1872 	CU_ASSERT(tcp_req->state == NVME_TCP_REQ_ACTIVE);
1873 	CU_ASSERT(NULL == TAILQ_FIRST(&tqpair->free_reqs));
1874 	CU_ASSERT(tcp_req == TAILQ_FIRST(&tqpair->outstanding_reqs));
1875 	CU_ASSERT(tcp_req->expected_datao == 0);
1876 	CU_ASSERT(tcp_req->req == &req);
1877 	CU_ASSERT(tcp_req->r2tl_remain == 0);
1878 	CU_ASSERT(tcp_req->r2tl_remain_next == 0);
1879 	CU_ASSERT(tcp_req->active_r2ts == 0);
1880 	CU_ASSERT(tcp_req->iovcnt == 2);
1881 	CU_ASSERT(tcp_req->ordering.raw == 0);
1882 	CU_ASSERT(req.cmd.cid == tcp_req->cid);
1883 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
1884 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
1885 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
1886 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
1887 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
1888 	CU_ASSERT(tcp_req->in_capsule_data == true);
1889 	CU_ASSERT(tcp_req->iov[0].iov_len == bio.iovs[0].iov_len);
1890 	CU_ASSERT(tcp_req->iov[1].iov_len == bio.iovs[1].iov_len);
1891 	CU_ASSERT(tcp_req->iov[0].iov_base == bio.iovs[0].iov_base);
1892 	CU_ASSERT(tcp_req->iov[1].iov_base == bio.iovs[1].iov_base);
1893 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD);
1894 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF) == 0);
1895 	CU_ASSERT((tcp_req->pdu->hdr.capsule_cmd.common.flags & SPDK_NVME_TCP_CH_FLAGS_DDGSTF) == 0);
1896 	CU_ASSERT(tcp_req->datao == 0);
1897 	CU_ASSERT(tcp_req->pdu->data_len == req.payload_size);
1898 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.pdo == sizeof(struct spdk_nvme_tcp_cmd));
1899 	CU_ASSERT(tcp_req->pdu->hdr.capsule_cmd.common.plen == sizeof(struct spdk_nvme_tcp_cmd) +
1900 		  req.payload_size);
1901 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_base == (void *)0xDEADBEEF);
1902 	CU_ASSERT(tcp_req->pdu->data_iov[0].iov_len == 1024);
1903 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_base == (void *)0xDFADBEEF);
1904 	CU_ASSERT(tcp_req->pdu->data_iov[1].iov_len == 1024);
1905 	CU_ASSERT(tcp_req->pdu->data_iovcnt == 2);
1906 
1907 	/* Request resource limit reached, expected to return -EAGAIN */
1908 	memset(&req, 0x00, sizeof(struct nvme_request));
1909 	CU_ASSERT(tqpair->stats->queued_requests == 0);
1910 
1911 	rc = nvme_tcp_qpair_submit_request(tctrlr->ctrlr.adminq, &req);
1912 	CU_ASSERT(rc == -EAGAIN);
1913 	CU_ASSERT(tqpair->stats->queued_requests == 1);
1914 
1915 	MOCK_CLEAR(spdk_sock_connect_ext);
1916 	free(tqpair->tcp_reqs);
1917 	spdk_free(tqpair->send_pdus);
1918 	free(tqpair);
1919 	free(tctrlr);
1920 }
1921 
1922 int
1923 main(int argc, char **argv)
1924 {
1925 	CU_pSuite	suite = NULL;
1926 	unsigned int	num_failures;
1927 
1928 	CU_initialize_registry();
1929 
1930 	suite = CU_add_suite("nvme_tcp", NULL, NULL);
1931 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
1932 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
1933 	CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
1934 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
1935 	CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
1936 	CU_ADD_TEST(suite, test_nvme_tcp_req_complete_safe);
1937 	CU_ADD_TEST(suite, test_nvme_tcp_req_get);
1938 	CU_ADD_TEST(suite, test_nvme_tcp_req_init);
1939 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_capsule_cmd_send);
1940 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_write_pdu);
1941 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_set_recv_state);
1942 	CU_ADD_TEST(suite, test_nvme_tcp_alloc_reqs);
1943 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_send_h2c_term_req);
1944 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_ch_handle);
1945 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_connect_sock);
1946 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_icreq_send);
1947 	CU_ADD_TEST(suite, test_nvme_tcp_c2h_payload_handle);
1948 	CU_ADD_TEST(suite, test_nvme_tcp_icresp_handle);
1949 	CU_ADD_TEST(suite, test_nvme_tcp_pdu_payload_handle);
1950 	CU_ADD_TEST(suite, test_nvme_tcp_capsule_resp_hdr_handle);
1951 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_connect_qpair);
1952 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_disconnect_qpair);
1953 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_create_io_qpair);
1954 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_delete_io_qpair);
1955 	CU_ADD_TEST(suite, test_nvme_tcp_poll_group_get_stats);
1956 	CU_ADD_TEST(suite, test_nvme_tcp_ctrlr_construct);
1957 	CU_ADD_TEST(suite, test_nvme_tcp_qpair_submit_request);
1958 
1959 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1960 	CU_cleanup_registry();
1961 	return num_failures;
1962 }
1963