xref: /spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c (revision ddd4603ceb2154dd59f14c6f2851f5f8cd1711c4)
1  /*   SPDX-License-Identifier: BSD-3-Clause
2   *   Copyright (C) 2018 Intel Corporation.
3   *   All rights reserved.
4   *   Copyright (c) 2021, 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5   */
6  
7  #include "spdk/stdinc.h"
8  #include "spdk/nvmf_spec.h"
9  #include "spdk_internal/cunit.h"
10  #include "spdk/bdev_zone.h"
11  
12  #include "common/lib/test_env.c"
13  #include "common/lib/test_sock.c"
14  
15  #include "nvmf/ctrlr.c"
16  #include "nvmf/tcp.c"
17  #include "spdk/sock.h"
18  #include "spdk/hexlify.h"
19  
20  #define UT_IPV4_ADDR "192.168.0.1"
21  #define UT_PORT "4420"
22  #define UT_NVMF_ADRFAM_INVALID 0xf
23  #define UT_MAX_QUEUE_DEPTH 128
24  #define UT_MAX_QPAIRS_PER_CTRLR 128
25  #define UT_IN_CAPSULE_DATA_SIZE 1024
26  #define UT_MAX_IO_SIZE 4096
27  #define UT_IO_UNIT_SIZE 1024
28  #define UT_MAX_AQ_DEPTH 64
29  #define UT_SQ_HEAD_MAX 128
30  #define UT_NUM_SHARED_BUFFERS 128
31  
32  static void *g_accel_p = (void *)0xdeadbeaf;
33  
34  SPDK_LOG_REGISTER_COMPONENT(nvmf)
35  
36  DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
37  	    int,
38  	    (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
39  	    0);
40  DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair), 0);
41  
42  DEFINE_STUB(nvmf_subsystem_add_ctrlr,
43  	    int,
44  	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
45  	    0);
46  
47  DEFINE_STUB(nvmf_subsystem_get_ctrlr,
48  	    struct spdk_nvmf_ctrlr *,
49  	    (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
50  	    NULL);
51  
52  DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
53  	    struct spdk_nvmf_subsystem *,
54  	    (struct spdk_nvmf_tgt *tgt, const char *subnqn),
55  	    NULL);
56  
57  DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
58  	    bool,
59  	    (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
60  	    true);
61  
62  DEFINE_STUB(nvmf_subsystem_find_listener,
63  	    struct spdk_nvmf_subsystem_listener *,
64  	    (struct spdk_nvmf_subsystem *subsystem,
65  	     const struct spdk_nvme_transport_id *trid),
66  	    (void *)0x1);
67  
68  DEFINE_STUB(spdk_sock_get_numa_id, int32_t, (struct spdk_sock *sock), 0);
69  
70  DEFINE_STUB(spdk_nvmf_ns_find_host,
71  	    struct spdk_nvmf_host *,
72  	    (struct spdk_nvmf_ns *ns, const char *hostnqn),
73  	    NULL);
74  
75  DEFINE_STUB_V(nvmf_get_discovery_log_page,
76  	      (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
77  	       uint32_t iovcnt, uint64_t offset, uint32_t length, struct spdk_nvme_transport_id *cmd_src_trid));
78  
79  DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr,
80  	      (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
81  
82  DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
83  	    struct spdk_nvmf_ns *,
84  	    (struct spdk_nvmf_subsystem *subsystem),
85  	    NULL);
86  
87  DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
88  	    struct spdk_nvmf_ns *,
89  	    (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
90  	    NULL);
91  DEFINE_STUB(nvmf_subsystem_zone_append_supported, bool,
92  	    (struct spdk_nvmf_subsystem *subsystem), false);
93  DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
94  	    bool,
95  	    (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
96  	    true);
97  
98  DEFINE_STUB(nvmf_ctrlr_dsm_supported,
99  	    bool,
100  	    (struct spdk_nvmf_ctrlr *ctrlr),
101  	    false);
102  
103  DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
104  	    bool,
105  	    (struct spdk_nvmf_ctrlr *ctrlr),
106  	    false);
107  
108  DEFINE_STUB(nvmf_ctrlr_copy_supported,
109  	    bool,
110  	    (struct spdk_nvmf_ctrlr *ctrlr),
111  	    false);
112  
113  DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
114  	    int,
115  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
116  	     struct spdk_nvmf_request *req),
117  	    0);
118  
119  DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
120  	    int,
121  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
122  	     struct spdk_nvmf_request *req),
123  	    0);
124  
125  DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
126  	    int,
127  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
128  	     struct spdk_nvmf_request *req),
129  	    0);
130  
131  DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
132  	    int,
133  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
134  	     struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
135  	    0);
136  
137  DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
138  	    int,
139  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
140  	     struct spdk_nvmf_request *req),
141  	    0);
142  
143  DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
144  	    int,
145  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
146  	     struct spdk_nvmf_request *req),
147  	    0);
148  
149  DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
150  	    int,
151  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
152  	     struct spdk_nvmf_request *req),
153  	    0);
154  
155  DEFINE_STUB(nvmf_bdev_ctrlr_copy_cmd,
156  	    int,
157  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
158  	     struct spdk_nvmf_request *req),
159  	    0);
160  
161  DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
162  	    int,
163  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
164  	     struct spdk_nvmf_request *req),
165  	    0);
166  
167  DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
168  	    int,
169  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
170  	     struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
171  	    0);
172  
173  DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx,
174  	    bool,
175  	    (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
176  	    false);
177  
178  DEFINE_STUB_V(nvmf_bdev_ctrlr_identify_iocs_nvm,
179  	      (struct spdk_nvmf_ns *ns, struct spdk_nvme_nvm_ns_data *nsdata_nvm));
180  
181  DEFINE_STUB(nvmf_transport_req_complete,
182  	    int,
183  	    (struct spdk_nvmf_request *req),
184  	    0);
185  
186  DEFINE_STUB(nvmf_bdev_zcopy_enabled,
187  	    bool,
188  	    (struct spdk_bdev *bdev),
189  	    false);
190  
191  DEFINE_STUB(nvmf_bdev_ctrlr_zcopy_start,
192  	    int,
193  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
194  	     struct spdk_nvmf_request *req),
195  	    0);
196  
197  DEFINE_STUB_V(nvmf_bdev_ctrlr_zcopy_end, (struct spdk_nvmf_request *req, bool commit));
198  
199  DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
200  	      (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
201  	       struct spdk_nvmf_transport *transport));
202  
203  DEFINE_STUB(spdk_sock_get_optimal_sock_group,
204  	    int,
205  	    (struct spdk_sock *sock, struct spdk_sock_group **group, struct spdk_sock_group *hint),
206  	    0);
207  
208  DEFINE_STUB(spdk_sock_group_get_ctx,
209  	    void *,
210  	    (struct spdk_sock_group *group),
211  	    NULL);
212  
213  DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
214  
215  DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
216  		enum spdk_nvme_transport_type trtype));
217  DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops));
218  
219  DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
220  
221  DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
222  	      (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
223  
224  DEFINE_STUB_V(nvmf_qpair_set_state, (struct spdk_nvmf_qpair *q, enum spdk_nvmf_qpair_state s));
225  
226  DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
227  DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
228  
229  DEFINE_STUB(nvmf_transport_req_free,
230  	    int,
231  	    (struct spdk_nvmf_request *req),
232  	    0);
233  
234  DEFINE_STUB(accel_channel_create, int, (void *io_device, void *ctx_buf), 0);
235  DEFINE_STUB_V(accel_channel_destroy, (void *io_device, void *ctx_buf));
236  DEFINE_STUB(spdk_bdev_reset, int, (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
237  				   spdk_bdev_io_completion_cb cb, void *cb_arg), 0);
238  DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
239  
240  DEFINE_STUB(spdk_bdev_get_max_active_zones, uint32_t,
241  	    (const struct spdk_bdev *bdev), 0);
242  DEFINE_STUB(spdk_bdev_get_max_open_zones, uint32_t,
243  	    (const struct spdk_bdev *bdev), 0);
244  DEFINE_STUB(spdk_bdev_is_zoned, bool, (const struct spdk_bdev *bdev), false);
245  DEFINE_STUB(spdk_bdev_get_zone_size, uint64_t, (const struct spdk_bdev *bdev), 0);
246  
247  DEFINE_STUB(spdk_nvme_ns_get_format_index, uint32_t,
248  	    (const struct spdk_nvme_ns_data *nsdata), 0);
249  
250  DEFINE_STUB(spdk_sock_get_impl_name, const char *, (struct spdk_sock *sock), "");
251  
252  DEFINE_STUB(spdk_sock_group_register_interrupt, int, (struct spdk_sock_group *group,
253  		uint32_t events, spdk_interrupt_fn fn, void *arg, const char *name), 0);
254  DEFINE_STUB_V(spdk_sock_group_unregister_interrupt, (struct spdk_sock_group *group));
255  
256  DEFINE_STUB(spdk_nvmf_subsystem_is_discovery, bool, (struct spdk_nvmf_subsystem *subsystem), false);
257  DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
258  	    (const struct spdk_nvmf_subsystem *subsystem), NULL);
259  DEFINE_STUB_V(spdk_keyring_put_key, (struct spdk_key *k));
260  
261  DEFINE_STUB(nvmf_ns_is_ptpl_capable, bool, (const struct spdk_nvmf_ns *ns), false);
262  DEFINE_STUB(nvmf_subsystem_host_auth_required, bool, (struct spdk_nvmf_subsystem *s, const char *n),
263  	    false);
264  DEFINE_STUB(nvmf_qpair_auth_init, int, (struct spdk_nvmf_qpair *q), 0);
265  DEFINE_STUB(nvmf_auth_request_exec, int, (struct spdk_nvmf_request *r),
266  	    SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
267  DEFINE_STUB(nvmf_request_get_buffers_abort, bool, (struct spdk_nvmf_request *r), false);
268  DEFINE_STUB(spdk_bdev_io_type_supported, bool,
269  	    (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
270  struct spdk_io_channel *
271  spdk_accel_get_io_channel(void)
272  {
273  	return spdk_get_io_channel(g_accel_p);
274  }
275  
276  DEFINE_STUB(spdk_accel_submit_crc32cv,
277  	    int,
278  	    (struct spdk_io_channel *ch, uint32_t *dst, struct iovec *iovs,
279  	     uint32_t iovcnt, uint32_t seed, spdk_accel_completion_cb cb_fn, void *cb_arg),
280  	    0);
281  
282  DEFINE_STUB(spdk_nvmf_bdev_ctrlr_nvme_passthru_admin,
283  	    int,
284  	    (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
285  	     struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
286  	     spdk_nvmf_nvme_passthru_cmd_cb cb_fn),
287  	    0)
288  
289  struct spdk_key {
290  	const char *name;
291  	char data[4096];
292  	int len;
293  } g_ut_psk = {
294  	.name = "ut-key",
295  };
296  
297  struct spdk_key *
298  spdk_keyring_get_key(const char *name)
299  {
300  	if (strcmp(name, g_ut_psk.name) == 0) {
301  		return &g_ut_psk;
302  	}
303  
304  	return NULL;
305  }
306  
307  int
308  spdk_key_get_key(struct spdk_key *key, void *buf, int len)
309  {
310  	len = spdk_min(key->len, len);
311  
312  	memcpy(buf, key->data, len);
313  
314  	return len;
315  }
316  
317  const char *
318  spdk_key_get_name(struct spdk_key *k)
319  {
320  	return k->name;
321  }
322  
323  struct spdk_bdev {
324  	int ut_mock;
325  	uint64_t blockcnt;
326  };
327  
328  int
329  spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
330  			       const struct spdk_nvme_transport_id *trid2)
331  {
332  	return 0;
333  }
334  
335  const char *
336  spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
337  {
338  	switch (trtype) {
339  	case SPDK_NVME_TRANSPORT_PCIE:
340  		return "PCIe";
341  	case SPDK_NVME_TRANSPORT_RDMA:
342  		return "RDMA";
343  	case SPDK_NVME_TRANSPORT_FC:
344  		return "FC";
345  	default:
346  		return NULL;
347  	}
348  }
349  
350  int
351  spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
352  {
353  	int len, i;
354  
355  	if (trstring == NULL) {
356  		return -EINVAL;
357  	}
358  
359  	len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
360  	if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
361  		return -EINVAL;
362  	}
363  
364  	/* cast official trstring to uppercase version of input. */
365  	for (i = 0; i < len; i++) {
366  		trid->trstring[i] = toupper(trstring[i]);
367  	}
368  	return 0;
369  }
370  
371  int
372  spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
373  			      struct spdk_nvmf_transport_poll_group *group,
374  			      struct spdk_nvmf_transport *transport,
375  			      uint32_t length)
376  {
377  	/* length more than 1 io unit length will fail. */
378  	if (length >= transport->opts.io_unit_size) {
379  		return -EINVAL;
380  	}
381  
382  	req->iovcnt = 1;
383  	req->iov[0].iov_base = (void *)0xDEADBEEF;
384  
385  	return 0;
386  }
387  
388  
389  void
390  nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
391  			    bool dif_insert_or_strip)
392  {
393  	uint64_t num_blocks;
394  
395  	SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
396  	num_blocks = ns->bdev->blockcnt;
397  	nsdata->nsze = num_blocks;
398  	nsdata->ncap = num_blocks;
399  	nsdata->nuse = num_blocks;
400  	nsdata->nlbaf = 0;
401  	nsdata->flbas.format = 0;
402  	nsdata->flbas.msb_format = 0;
403  	nsdata->lbaf[0].lbads = spdk_u32log2(512);
404  }
405  
406  const char *
407  spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
408  {
409  	return subsystem->sn;
410  }
411  
412  const char *
413  spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
414  {
415  	return subsystem->mn;
416  }
417  
418  static void
419  test_nvmf_tcp_create(void)
420  {
421  	struct spdk_thread *thread;
422  	struct spdk_nvmf_transport *transport;
423  	struct spdk_nvmf_tcp_transport *ttransport;
424  	struct spdk_nvmf_transport_opts opts;
425  	struct spdk_sock_group grp = {};
426  
427  	thread = spdk_thread_create(NULL, NULL);
428  	SPDK_CU_ASSERT_FATAL(thread != NULL);
429  	spdk_set_thread(thread);
430  
431  	MOCK_SET(spdk_sock_group_create, &grp);
432  
433  	/* case 1 */
434  	memset(&opts, 0, sizeof(opts));
435  	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
436  	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
437  	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
438  	opts.max_io_size = UT_MAX_IO_SIZE;
439  	opts.io_unit_size = UT_IO_UNIT_SIZE;
440  	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
441  	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
442  	/* expect success */
443  	transport = nvmf_tcp_create(&opts);
444  	CU_ASSERT_PTR_NOT_NULL(transport);
445  	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
446  	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
447  	transport->opts = opts;
448  	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
449  	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
450  	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
451  	CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
452  	/* destroy transport */
453  	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
454  
455  	/* case 2 */
456  	memset(&opts, 0, sizeof(opts));
457  	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
458  	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
459  	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
460  	opts.max_io_size = UT_MAX_IO_SIZE;
461  	opts.io_unit_size = UT_MAX_IO_SIZE + 1;
462  	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
463  	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
464  	/* expect success */
465  	transport = nvmf_tcp_create(&opts);
466  	CU_ASSERT_PTR_NOT_NULL(transport);
467  	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
468  	SPDK_CU_ASSERT_FATAL(ttransport != NULL);
469  	transport->opts = opts;
470  	CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
471  	CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
472  	CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
473  	CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
474  	/* destroy transport */
475  	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
476  
477  	/* case 3 */
478  	memset(&opts, 0, sizeof(opts));
479  	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
480  	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
481  	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
482  	opts.max_io_size = UT_MAX_IO_SIZE;
483  	opts.io_unit_size = 16;
484  	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
485  	/* expect fails */
486  	transport = nvmf_tcp_create(&opts);
487  	CU_ASSERT_PTR_NULL(transport);
488  
489  	MOCK_CLEAR_P(spdk_sock_group_create);
490  
491  	spdk_thread_exit(thread);
492  	while (!spdk_thread_is_exited(thread)) {
493  		spdk_thread_poll(thread, 0, 0);
494  	}
495  	spdk_thread_destroy(thread);
496  }
497  
498  static void
499  test_nvmf_tcp_destroy(void)
500  {
501  	struct spdk_thread *thread;
502  	struct spdk_nvmf_transport *transport;
503  	struct spdk_nvmf_transport_opts opts;
504  	struct spdk_sock_group grp = {};
505  
506  	thread = spdk_thread_create(NULL, NULL);
507  	SPDK_CU_ASSERT_FATAL(thread != NULL);
508  	spdk_set_thread(thread);
509  
510  	/* case 1 */
511  	memset(&opts, 0, sizeof(opts));
512  	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
513  	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
514  	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
515  	opts.max_io_size = UT_MAX_IO_SIZE;
516  	opts.io_unit_size = UT_IO_UNIT_SIZE;
517  	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
518  	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
519  	MOCK_SET(spdk_sock_group_create, &grp);
520  	transport = nvmf_tcp_create(&opts);
521  	MOCK_CLEAR_P(spdk_sock_group_create);
522  	CU_ASSERT_PTR_NOT_NULL(transport);
523  	transport->opts = opts;
524  	/* destroy transport */
525  	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
526  
527  	spdk_thread_exit(thread);
528  	while (!spdk_thread_is_exited(thread)) {
529  		spdk_thread_poll(thread, 0, 0);
530  	}
531  	spdk_thread_destroy(thread);
532  }
533  
534  static void
535  init_accel(void)
536  {
537  	spdk_io_device_register(g_accel_p, accel_channel_create, accel_channel_destroy,
538  				sizeof(int), "accel_p");
539  }
540  
541  static void
542  fini_accel(void)
543  {
544  	spdk_io_device_unregister(g_accel_p, NULL);
545  }
546  
547  static void
548  test_nvmf_tcp_poll_group_create(void)
549  {
550  	struct spdk_nvmf_transport *transport;
551  	struct spdk_nvmf_transport_poll_group *group;
552  	struct spdk_nvmf_tcp_poll_group *tgroup;
553  	struct spdk_thread *thread;
554  	struct spdk_nvmf_transport_opts opts;
555  	struct spdk_sock_group grp = {};
556  
557  	thread = spdk_thread_create(NULL, NULL);
558  	SPDK_CU_ASSERT_FATAL(thread != NULL);
559  	spdk_set_thread(thread);
560  
561  	init_accel();
562  
563  	memset(&opts, 0, sizeof(opts));
564  	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
565  	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
566  	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
567  	opts.max_io_size = UT_MAX_IO_SIZE;
568  	opts.io_unit_size = UT_IO_UNIT_SIZE;
569  	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
570  	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
571  	MOCK_SET(spdk_sock_group_create, &grp);
572  	transport = nvmf_tcp_create(&opts);
573  	MOCK_CLEAR_P(spdk_sock_group_create);
574  	CU_ASSERT_PTR_NOT_NULL(transport);
575  	transport->opts = opts;
576  	MOCK_SET(spdk_sock_group_create, &grp);
577  	group = nvmf_tcp_poll_group_create(transport, NULL);
578  	MOCK_CLEAR_P(spdk_sock_group_create);
579  	SPDK_CU_ASSERT_FATAL(group);
580  	if (opts.in_capsule_data_size < SPDK_NVME_TCP_IN_CAPSULE_DATA_MAX_SIZE) {
581  		tgroup = SPDK_CONTAINEROF(group, struct spdk_nvmf_tcp_poll_group, group);
582  		SPDK_CU_ASSERT_FATAL(tgroup->control_msg_list);
583  	}
584  	group->transport = transport;
585  	nvmf_tcp_poll_group_destroy(group);
586  	nvmf_tcp_destroy(transport, NULL, NULL);
587  
588  	fini_accel();
589  	spdk_thread_exit(thread);
590  	while (!spdk_thread_is_exited(thread)) {
591  		spdk_thread_poll(thread, 0, 0);
592  	}
593  	spdk_thread_destroy(thread);
594  }
595  
596  static void
597  test_nvmf_tcp_send_c2h_data(void)
598  {
599  	struct spdk_thread *thread;
600  	struct spdk_nvmf_tcp_transport ttransport = {};
601  	struct spdk_nvmf_tcp_qpair tqpair = {};
602  	struct spdk_nvmf_tcp_req tcp_req = {};
603  	struct nvme_tcp_pdu pdu = {};
604  	struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
605  
606  	ttransport.tcp_opts.c2h_success = true;
607  	thread = spdk_thread_create(NULL, NULL);
608  	SPDK_CU_ASSERT_FATAL(thread != NULL);
609  	spdk_set_thread(thread);
610  
611  	tcp_req.pdu = &pdu;
612  	tcp_req.req.length = 300;
613  	tcp_req.req.qpair = &tqpair.qpair;
614  
615  	tqpair.qpair.transport = &ttransport.transport;
616  
617  	/* Set qpair state to make unrelated operations NOP */
618  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
619  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
620  
621  	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
622  
623  	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
624  	tcp_req.req.iov[0].iov_len = 101;
625  	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
626  	tcp_req.req.iov[1].iov_len = 100;
627  	tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
628  	tcp_req.req.iov[2].iov_len = 99;
629  	tcp_req.req.iovcnt = 3;
630  	tcp_req.req.length = 300;
631  
632  	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
633  
634  	c2h_data = &pdu.hdr.c2h_data;
635  	CU_ASSERT(c2h_data->datao == 0);
636  	CU_ASSERT(c2h_data->datal = 300);
637  	CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
638  	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
639  	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS);
640  
641  	CU_ASSERT(pdu.data_iovcnt == 3);
642  	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
643  	CU_ASSERT(pdu.data_iov[0].iov_len == 101);
644  	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
645  	CU_ASSERT(pdu.data_iov[1].iov_len == 100);
646  	CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
647  	CU_ASSERT(pdu.data_iov[2].iov_len == 99);
648  
649  	tcp_req.pdu_in_use = false;
650  	tcp_req.rsp.cdw0 = 1;
651  	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
652  
653  	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
654  	CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
655  
656  	ttransport.tcp_opts.c2h_success = false;
657  	tcp_req.pdu_in_use = false;
658  	tcp_req.rsp.cdw0 = 0;
659  	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
660  
661  	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
662  	CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
663  
664  	tcp_req.pdu_in_use = false;
665  	tcp_req.rsp.cdw0 = 1;
666  	nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
667  
668  	CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
669  	CU_ASSERT((c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_SUCCESS) == 0);
670  
671  	spdk_thread_exit(thread);
672  	while (!spdk_thread_is_exited(thread)) {
673  		spdk_thread_poll(thread, 0, 0);
674  	}
675  	spdk_thread_destroy(thread);
676  }
677  
678  #define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024)
679  
680  static void
681  test_nvmf_tcp_h2c_data_hdr_handle(void)
682  {
683  	struct spdk_nvmf_tcp_transport ttransport = {};
684  	struct spdk_nvmf_tcp_qpair tqpair = {};
685  	struct nvme_tcp_pdu pdu = {};
686  	struct spdk_nvmf_tcp_req tcp_req = {};
687  	struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
688  
689  	/* Set qpair state to make unrelated operations NOP */
690  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
691  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
692  	tqpair.resource_count = 1;
693  	tqpair.reqs = &tcp_req;
694  
695  	tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
696  	tcp_req.req.iov[0].iov_len = 101;
697  	tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
698  	tcp_req.req.iov[1].iov_len = 99;
699  	tcp_req.req.iovcnt = 2;
700  	tcp_req.req.length = 200;
701  	tcp_req.state = TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;
702  
703  	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
704  	tcp_req.req.cmd->nvme_cmd.cid = 1;
705  	tcp_req.ttag = 1;
706  
707  	h2c_data = &pdu.hdr.h2c_data;
708  	h2c_data->cccid = 1;
709  	h2c_data->ttag = 1;
710  	h2c_data->datao = 0;
711  	h2c_data->datal = 200;
712  
713  	nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
714  
715  	CU_ASSERT(pdu.data_iovcnt == 2);
716  	CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
717  	CU_ASSERT(pdu.data_iov[0].iov_len == 101);
718  	CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
719  	CU_ASSERT(pdu.data_iov[1].iov_len == 99);
720  }
721  
722  
723  static void
724  test_nvmf_tcp_in_capsule_data_handle(void)
725  {
726  	struct spdk_nvmf_tcp_transport ttransport = {};
727  	struct spdk_nvmf_transport_ops ops = {};
728  	struct spdk_nvmf_tcp_qpair tqpair = {};
729  	struct nvme_tcp_pdu *pdu, pdu_in_progress = {};
730  	union nvmf_c2h_msg rsp0 = {};
731  	union nvmf_c2h_msg rsp = {};
732  
733  	struct spdk_nvmf_tcp_req tcp_req2 = {};
734  	struct spdk_nvmf_tcp_req tcp_req1 = {};
735  
736  	struct spdk_nvme_tcp_cmd *capsule_data;
737  	struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
738  	struct spdk_nvme_sgl_descriptor *sgl;
739  
740  	struct spdk_nvmf_transport_poll_group *group;
741  	struct spdk_nvmf_tcp_poll_group tcp_group = {};
742  	struct spdk_sock_group grp = {};
743  
744  	tqpair.pdu_in_progress = &pdu_in_progress;
745  	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
746  	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
747  	ttransport.transport.ops = &ops;
748  	ops.req_get_buffers_done = nvmf_tcp_req_get_buffers_done;
749  
750  	tcp_group.sock_group = &grp;
751  	TAILQ_INIT(&tcp_group.qpairs);
752  	group = &tcp_group.group;
753  	group->transport = &ttransport.transport;
754  	tqpair.group = &tcp_group;
755  
756  	TAILQ_INIT(&tqpair.tcp_req_free_queue);
757  	TAILQ_INIT(&tqpair.tcp_req_working_queue);
758  
759  	TAILQ_INSERT_TAIL(&tqpair.tcp_req_free_queue, &tcp_req2, state_link);
760  	tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
761  	tqpair.qpair.transport = &ttransport.transport;
762  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
763  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
764  	tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED;
765  
766  	/* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
767  	tcp_req2.req.qpair = &tqpair.qpair;
768  	tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
769  	tcp_req2.req.rsp = &rsp;
770  
771  	/* init tcp_req1 */
772  	tcp_req1.req.qpair = &tqpair.qpair;
773  	tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
774  	tcp_req1.req.rsp = &rsp0;
775  	tcp_req1.state = TCP_REQUEST_STATE_NEW;
776  	tcp_req1.req.data_from_pool = false;
777  
778  	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req1, state_link);
779  	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
780  
781  	/* init pdu, make pdu need sgl buff */
782  	pdu = tqpair.pdu_in_progress;
783  	capsule_data = &pdu->hdr.capsule_cmd;
784  	nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
785  	sgl = &capsule_data->ccsqe.dptr.sgl1;
786  
787  	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
788  	capsule_data->common.hlen = sizeof(*capsule_data);
789  	capsule_data->common.plen = 1096;
790  	capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
791  
792  	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
793  	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
794  	sgl->unkeyed.length = UT_IO_UNIT_SIZE;
795  
796  	nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
797  
798  	/* pretend that tcp_req1 is waiting in the iobuf waiting queue */
799  	nvmf_tcp_req_process(&ttransport, &tcp_req1);
800  	CU_ASSERT(tcp_req1.req.data_from_pool == false);
801  
802  	sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
803  
804  	/* process tqpair capsule req. */
805  	nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, tqpair.pdu_in_progress);
806  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
807  	CU_ASSERT(tqpair.pdu_in_progress->req == (void *)&tcp_req2);
808  
809  	/* pretend that buffer for tcp_req1 becomes available */
810  	spdk_nvmf_request_get_buffers(&tcp_req1.req, group, &ttransport.transport, UT_IO_UNIT_SIZE - 1);
811  	/* trigger callback as nvmf_request_iobuf_get_cb would */
812  	ttransport.transport.ops->req_get_buffers_done(&tcp_req1.req);
813  	CU_ASSERT(tcp_req1.state == TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
814  }
815  
816  static void
817  test_nvmf_tcp_qpair_init_mem_resource(void)
818  {
819  	int rc;
820  	struct spdk_nvmf_tcp_qpair *tqpair = NULL;
821  	struct spdk_nvmf_transport transport = {};
822  	struct spdk_thread *thread;
823  
824  	thread = spdk_thread_create(NULL, NULL);
825  	SPDK_CU_ASSERT_FATAL(thread != NULL);
826  	spdk_set_thread(thread);
827  
828  	tqpair = calloc(1, sizeof(*tqpair));
829  	tqpair->qpair.transport = &transport;
830  
831  	nvmf_tcp_opts_init(&transport.opts);
832  	CU_ASSERT(transport.opts.max_queue_depth == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH);
833  	CU_ASSERT(transport.opts.max_qpairs_per_ctrlr == SPDK_NVMF_TCP_DEFAULT_MAX_QPAIRS_PER_CTRLR);
834  	CU_ASSERT(transport.opts.in_capsule_data_size == SPDK_NVMF_TCP_DEFAULT_IN_CAPSULE_DATA_SIZE);
835  	CU_ASSERT(transport.opts.max_io_size ==	SPDK_NVMF_TCP_DEFAULT_MAX_IO_SIZE);
836  	CU_ASSERT(transport.opts.io_unit_size == SPDK_NVMF_TCP_DEFAULT_IO_UNIT_SIZE);
837  	CU_ASSERT(transport.opts.max_aq_depth == SPDK_NVMF_TCP_DEFAULT_MAX_ADMIN_QUEUE_DEPTH);
838  	CU_ASSERT(transport.opts.num_shared_buffers == SPDK_NVMF_TCP_DEFAULT_NUM_SHARED_BUFFERS);
839  	CU_ASSERT(transport.opts.buf_cache_size == SPDK_NVMF_TCP_DEFAULT_BUFFER_CACHE_SIZE);
840  	CU_ASSERT(transport.opts.dif_insert_or_strip ==	SPDK_NVMF_TCP_DEFAULT_DIF_INSERT_OR_STRIP);
841  	CU_ASSERT(transport.opts.abort_timeout_sec == SPDK_NVMF_TCP_DEFAULT_ABORT_TIMEOUT_SEC);
842  	CU_ASSERT(transport.opts.transport_specific == NULL);
843  
844  	rc = nvmf_tcp_qpair_init(&tqpair->qpair);
845  	CU_ASSERT(rc == 0);
846  	CU_ASSERT(tqpair->host_hdgst_enable == true);
847  	CU_ASSERT(tqpair->host_ddgst_enable == true);
848  
849  	rc = nvmf_tcp_qpair_init_mem_resource(tqpair);
850  	CU_ASSERT(rc == 0);
851  	CU_ASSERT(tqpair->resource_count == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH);
852  	CU_ASSERT(tqpair->reqs != NULL);
853  	CU_ASSERT(tqpair->bufs != NULL);
854  	CU_ASSERT(tqpair->pdus != NULL);
855  	/* Just to check the first and last entry */
856  	CU_ASSERT(tqpair->reqs[0].ttag == 1);
857  	CU_ASSERT(tqpair->reqs[0].req.qpair == &tqpair->qpair);
858  	CU_ASSERT(tqpair->reqs[0].pdu == &tqpair->pdus[0]);
859  	CU_ASSERT(tqpair->reqs[0].pdu->qpair == &tqpair->qpair);
860  	CU_ASSERT(tqpair->reqs[0].buf == (void *)((uintptr_t)tqpair->bufs));
861  	CU_ASSERT(tqpair->reqs[0].req.rsp == (void *)&tqpair->reqs[0].rsp);
862  	CU_ASSERT(tqpair->reqs[0].req.cmd == (void *)&tqpair->reqs[0].cmd);
863  	CU_ASSERT(tqpair->reqs[0].state == TCP_REQUEST_STATE_FREE);
864  	CU_ASSERT(tqpair->reqs[127].ttag == 128);
865  	CU_ASSERT(tqpair->reqs[127].req.qpair == &tqpair->qpair);
866  	CU_ASSERT(tqpair->reqs[127].pdu == &tqpair->pdus[127]);
867  	CU_ASSERT(tqpair->reqs[127].pdu->qpair == &tqpair->qpair);
868  	CU_ASSERT(tqpair->reqs[127].buf == (void *)((uintptr_t)tqpair->bufs) + 127 * 4096);
869  	CU_ASSERT(tqpair->reqs[127].req.rsp == (void *)&tqpair->reqs[127].rsp);
870  	CU_ASSERT(tqpair->reqs[127].req.cmd == (void *)&tqpair->reqs[127].cmd);
871  	CU_ASSERT(tqpair->reqs[127].state == TCP_REQUEST_STATE_FREE);
872  	CU_ASSERT(tqpair->state_cntr[TCP_REQUEST_STATE_FREE] == SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH);
873  	CU_ASSERT(tqpair->mgmt_pdu == &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH]);
874  	CU_ASSERT(tqpair->mgmt_pdu->qpair == tqpair);
875  	CU_ASSERT(tqpair->pdu_in_progress ==
876  		  &tqpair->pdus[2 * SPDK_NVMF_TCP_DEFAULT_MAX_IO_QUEUE_DEPTH - 1]);
877  	CU_ASSERT(tqpair->recv_buf_size == (4096 + sizeof(struct spdk_nvme_tcp_cmd) + 2 *
878  					    SPDK_NVME_TCP_DIGEST_LEN) * SPDK_NVMF_TCP_RECV_BUF_SIZE_FACTOR);
879  
880  	/* Free all of tqpair resource */
881  	nvmf_tcp_qpair_destroy(tqpair);
882  
883  	spdk_thread_exit(thread);
884  	while (!spdk_thread_is_exited(thread)) {
885  		spdk_thread_poll(thread, 0, 0);
886  	}
887  	spdk_thread_destroy(thread);
888  }
889  
890  static void
891  test_nvmf_tcp_send_c2h_term_req(void)
892  {
893  	struct spdk_nvmf_tcp_qpair tqpair = {};
894  	struct nvme_tcp_pdu pdu = {}, mgmt_pdu = {}, pdu_in_progress = {};
895  	enum spdk_nvme_tcp_term_req_fes fes = SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
896  	uint32_t error_offset = 1;
897  
898  	mgmt_pdu.qpair = &tqpair;
899  	tqpair.mgmt_pdu = &mgmt_pdu;
900  	tqpair.pdu_in_progress = &pdu_in_progress;
901  	tqpair.tcp_pdu_working_count = 1;
902  
903  	/* case1: hlen < SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == hlen */
904  	pdu.hdr.common.hlen = 64;
905  	nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
906  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
907  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
908  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
909  		  pdu.hdr.common.hlen);
910  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
911  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD);
912  
913  	/* case2: hlen > SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE, Expect: copy_len == SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE */
914  	pdu.hdr.common.hlen = 255;
915  	nvmf_tcp_send_c2h_term_req(&tqpair, &pdu, fes, error_offset);
916  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
917  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
918  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == (unsigned)
919  		  tqpair.mgmt_pdu->hdr.term_req.common.hlen + SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
920  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
921  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fes == SPDK_NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD);
922  }
923  
924  static void
925  test_nvmf_tcp_send_capsule_resp_pdu(void)
926  {
927  	struct spdk_nvmf_tcp_req tcp_req = {};
928  	struct spdk_nvmf_tcp_qpair tqpair = {};
929  	struct nvme_tcp_pdu pdu = {};
930  
931  	tcp_req.pdu_in_use = false;
932  	tcp_req.req.qpair = &tqpair.qpair;
933  	tcp_req.pdu = &pdu;
934  	tcp_req.req.rsp = (union nvmf_c2h_msg *)&tcp_req.rsp;
935  	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
936  	tqpair.host_hdgst_enable = true;
937  
938  	nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair);
939  	CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP);
940  	CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp) +
941  		  SPDK_NVME_TCP_DIGEST_LEN);
942  	CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp));
943  	CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl,
944  			  sizeof(struct spdk_nvme_cpl)));
945  	CU_ASSERT(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF);
946  	CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free);
947  	CU_ASSERT(pdu.cb_arg == &tcp_req);
948  	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
949  	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp) + SPDK_NVME_TCP_DIGEST_LEN);
950  
951  	/* hdgst disable */
952  	tqpair.host_hdgst_enable = false;
953  	tcp_req.pdu_in_use = false;
954  	memset(&pdu, 0, sizeof(pdu));
955  
956  	nvmf_tcp_send_capsule_resp_pdu(&tcp_req, &tqpair);
957  	CU_ASSERT(pdu.hdr.capsule_resp.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP);
958  	CU_ASSERT(pdu.hdr.capsule_resp.common.plen == sizeof(struct spdk_nvme_tcp_rsp));
959  	CU_ASSERT(pdu.hdr.capsule_resp.common.hlen == sizeof(struct spdk_nvme_tcp_rsp));
960  	CU_ASSERT(!memcmp(&pdu.hdr.capsule_resp.rccqe, &tcp_req.req.rsp->nvme_cpl,
961  			  sizeof(struct spdk_nvme_cpl)));
962  	CU_ASSERT(!(pdu.hdr.capsule_resp.common.flags & SPDK_NVME_TCP_CH_FLAGS_HDGSTF));
963  	CU_ASSERT(pdu.cb_fn == nvmf_tcp_request_free);
964  	CU_ASSERT(pdu.cb_arg == &tcp_req);
965  	CU_ASSERT(pdu.iov[0].iov_base == &pdu.hdr.raw);
966  	CU_ASSERT(pdu.iov[0].iov_len == sizeof(struct spdk_nvme_tcp_rsp));
967  }
968  
969  static void
970  test_nvmf_tcp_icreq_handle(void)
971  {
972  	struct spdk_nvmf_tcp_transport ttransport = {};
973  	struct spdk_nvmf_tcp_qpair tqpair = {};
974  	struct nvme_tcp_pdu pdu = {};
975  	struct nvme_tcp_pdu mgmt_pdu = {};
976  	struct nvme_tcp_pdu pdu_in_progress = {};
977  	struct spdk_nvme_tcp_ic_resp *ic_resp;
978  
979  	mgmt_pdu.qpair = &tqpair;
980  	tqpair.mgmt_pdu = &mgmt_pdu;
981  	tqpair.pdu_in_progress = &pdu_in_progress;
982  	tqpair.tcp_pdu_working_count = 1;
983  
984  	/* case 1: Expected ICReq PFV 0 and got are different. */
985  	pdu.hdr.ic_req.pfv = 1;
986  
987  	nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
988  
989  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
990  
991  	/* case 2: Expected ICReq HPDA in range 0-31 and got are different. */
992  	pdu.hdr.ic_req.hpda = SPDK_NVME_TCP_HPDA_MAX + 1;
993  
994  	nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
995  
996  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
997  
998  	/* case 3: Expect: PASS.  */
999  	ttransport.transport.opts.max_io_size = 32;
1000  	pdu.hdr.ic_req.pfv = 0;
1001  	tqpair.host_hdgst_enable = false;
1002  	tqpair.host_ddgst_enable = false;
1003  	tqpair.recv_buf_size = 64;
1004  	pdu.hdr.ic_req.hpda = 16;
1005  
1006  	nvmf_tcp_icreq_handle(&ttransport, &tqpair, &pdu);
1007  
1008  	ic_resp = &tqpair.mgmt_pdu->hdr.ic_resp;
1009  	CU_ASSERT(tqpair.recv_buf_size == MIN_SOCK_PIPE_SIZE);
1010  	CU_ASSERT(tqpair.cpda == pdu.hdr.ic_req.hpda);
1011  	CU_ASSERT(ic_resp->common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_IC_RESP);
1012  	CU_ASSERT(ic_resp->common.hlen == sizeof(struct spdk_nvme_tcp_ic_resp));
1013  	CU_ASSERT(ic_resp->common.plen ==  sizeof(struct spdk_nvme_tcp_ic_resp));
1014  	CU_ASSERT(ic_resp->pfv == 0);
1015  	CU_ASSERT(ic_resp->cpda == tqpair.cpda);
1016  	CU_ASSERT(ic_resp->maxh2cdata == ttransport.transport.opts.max_io_size);
1017  	CU_ASSERT(ic_resp->dgst.bits.hdgst_enable == 0);
1018  	CU_ASSERT(ic_resp->dgst.bits.ddgst_enable == 0);
1019  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1020  }
1021  
1022  static void
1023  test_nvmf_tcp_check_xfer_type(void)
1024  {
1025  	const uint16_t cid = 0xAA;
1026  	struct spdk_nvmf_tcp_transport ttransport = {};
1027  	struct spdk_nvmf_tcp_qpair tqpair = {};
1028  	struct nvme_tcp_pdu pdu_in_progress = {};
1029  	union nvmf_c2h_msg rsp0 = {};
1030  
1031  	struct spdk_nvmf_tcp_req tcp_req = {};
1032  	struct nvme_tcp_pdu rsp_pdu = {};
1033  
1034  	struct spdk_nvme_tcp_cmd *capsule_data;
1035  	struct spdk_nvme_sgl_descriptor *sgl;
1036  
1037  	struct spdk_nvmf_transport_poll_group *group;
1038  	struct spdk_nvmf_tcp_poll_group tcp_group = {};
1039  	struct spdk_sock_group grp = {};
1040  
1041  	tqpair.pdu_in_progress = &pdu_in_progress;
1042  	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
1043  	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
1044  
1045  	tcp_group.sock_group = &grp;
1046  	TAILQ_INIT(&tcp_group.qpairs);
1047  	group = &tcp_group.group;
1048  	group->transport = &ttransport.transport;
1049  	tqpair.group = &tcp_group;
1050  
1051  	TAILQ_INIT(&tqpair.tcp_req_free_queue);
1052  	TAILQ_INIT(&tqpair.tcp_req_working_queue);
1053  
1054  	tqpair.qpair.transport = &ttransport.transport;
1055  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1056  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1057  	tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED;
1058  
1059  	/* init tcp_req */
1060  	tcp_req.req.qpair = &tqpair.qpair;
1061  	tcp_req.pdu = &rsp_pdu;
1062  	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
1063  	tcp_req.req.rsp = &rsp0;
1064  	tcp_req.state = TCP_REQUEST_STATE_NEW;
1065  
1066  	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link);
1067  	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
1068  
1069  	/* init pdu, make pdu need sgl buff */
1070  	capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd;
1071  	sgl = &capsule_data->ccsqe.dptr.sgl1;
1072  
1073  	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
1074  	capsule_data->common.hlen = sizeof(*capsule_data);
1075  	capsule_data->common.plen = 1096;
1076  	capsule_data->ccsqe.opc = 0x10 | SPDK_NVME_DATA_BIDIRECTIONAL;
1077  	/* Need to set to a non zero valid to check it gets copied to the response */
1078  	capsule_data->ccsqe.cid = cid;
1079  
1080  	/* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */
1081  	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
1082  	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
1083  	sgl->unkeyed.length = UT_IO_UNIT_SIZE;
1084  
1085  	/* Process a command and ensure that it fails and the request is set up to return an error */
1086  	nvmf_tcp_req_process(&ttransport, &tcp_req);
1087  	CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
1088  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_READY);
1089  	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.cid == cid);
1090  	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
1091  	CU_ASSERT(tcp_req.req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
1092  }
1093  
1094  static void
1095  test_nvmf_tcp_invalid_sgl(void)
1096  {
1097  	const uint16_t cid = 0xAABB;
1098  	struct spdk_nvmf_tcp_transport ttransport = {};
1099  	struct spdk_nvmf_tcp_qpair tqpair = {};
1100  	struct nvme_tcp_pdu pdu_in_progress = {};
1101  	union nvmf_c2h_msg rsp0 = {};
1102  
1103  	struct spdk_nvmf_tcp_req tcp_req = {};
1104  	struct nvme_tcp_pdu rsp_pdu = {};
1105  	struct nvme_tcp_pdu mgmt_pdu = {};
1106  
1107  	struct spdk_nvme_tcp_cmd *capsule_data;
1108  	struct spdk_nvme_sgl_descriptor *sgl;
1109  
1110  	struct spdk_nvmf_transport_poll_group *group;
1111  	struct spdk_nvmf_tcp_poll_group tcp_group = {};
1112  	struct spdk_sock_group grp = {};
1113  
1114  	tqpair.pdu_in_progress = &pdu_in_progress;
1115  	ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
1116  	ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
1117  
1118  	tcp_group.sock_group = &grp;
1119  	TAILQ_INIT(&tcp_group.qpairs);
1120  	group = &tcp_group.group;
1121  	group->transport = &ttransport.transport;
1122  	tqpair.group = &tcp_group;
1123  
1124  	TAILQ_INIT(&tqpair.tcp_req_free_queue);
1125  	TAILQ_INIT(&tqpair.tcp_req_working_queue);
1126  
1127  	tqpair.qpair.transport = &ttransport.transport;
1128  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1129  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
1130  	tqpair.qpair.state = SPDK_NVMF_QPAIR_ENABLED;
1131  
1132  	/* init tcp_req */
1133  	tcp_req.req.qpair = &tqpair.qpair;
1134  	tcp_req.pdu = &rsp_pdu;
1135  	tcp_req.pdu->qpair = &tqpair;
1136  	tqpair.mgmt_pdu = &mgmt_pdu;
1137  	tqpair.mgmt_pdu->qpair = &tqpair;
1138  	tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
1139  	tcp_req.req.rsp = &rsp0;
1140  	tcp_req.state = TCP_REQUEST_STATE_NEW;
1141  
1142  	TAILQ_INSERT_TAIL(&tqpair.tcp_req_working_queue, &tcp_req, state_link);
1143  	tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
1144  
1145  	/* init pdu, make pdu need sgl buff */
1146  	capsule_data = &tqpair.pdu_in_progress->hdr.capsule_cmd;
1147  	sgl = &capsule_data->ccsqe.dptr.sgl1;
1148  
1149  	capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
1150  	capsule_data->common.hlen = sizeof(*capsule_data);
1151  	capsule_data->common.plen = 1096;
1152  	capsule_data->ccsqe.opc = SPDK_NVME_OPC_WRITE;
1153  	/* Need to set to a non zero valid to check it gets copied to the response */
1154  	capsule_data->ccsqe.cid = cid;
1155  
1156  	/* Set up SGL to ensure nvmf_tcp_req_parse_sgl returns an error */
1157  	sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
1158  	sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
1159  	sgl->unkeyed.length = UT_MAX_IO_SIZE + 1;
1160  
1161  	/* Process a command and ensure that it fails and the request is set up to return an error */
1162  	nvmf_tcp_req_process(&ttransport, &tcp_req);
1163  	CU_ASSERT(tcp_req.state == TCP_REQUEST_STATE_NEED_BUFFER);
1164  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1165  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1166  }
1167  
1168  static void
1169  test_nvmf_tcp_pdu_ch_handle(void)
1170  {
1171  	struct spdk_nvmf_tcp_qpair tqpair = {};
1172  	struct nvme_tcp_pdu mgmt_pdu = {}, pdu_in_progress = {};
1173  
1174  	mgmt_pdu.qpair = &tqpair;
1175  	tqpair.mgmt_pdu = &mgmt_pdu;
1176  	tqpair.pdu_in_progress = &pdu_in_progress;
1177  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1178  	tqpair.cpda = 0;
1179  
1180  	/* Test case: Already received ICreq PDU. Expect: fail */
1181  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1182  	tqpair.state = NVMF_TCP_QPAIR_STATE_INITIALIZING;
1183  	nvmf_tcp_pdu_ch_handle(&tqpair);
1184  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1185  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1186  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1187  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
1188  
1189  	/* Test case: Expected PDU header length and received are different. Expect: fail */
1190  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1191  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1192  	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
1193  	tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
1194  	tqpair.pdu_in_progress->hdr.common.hlen = 0;
1195  	nvmf_tcp_pdu_ch_handle(&tqpair);
1196  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1197  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1198  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1199  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
1200  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 2);
1201  
1202  	/* Test case: The TCP/IP tqpair connection is not negotiated. Expect: fail */
1203  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1204  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_RESP;
1205  	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
1206  	tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
1207  	tqpair.pdu_in_progress->hdr.common.hlen = 0;
1208  	nvmf_tcp_pdu_ch_handle(&tqpair);
1209  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1210  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1211  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1212  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen);
1213  
1214  	/* Test case: Unexpected PDU type. Expect: fail */
1215  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1216  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_RESP;
1217  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1218  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1219  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
1220  	nvmf_tcp_pdu_ch_handle(&tqpair);
1221  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1222  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1223  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1224  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
1225  		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
1226  
1227  	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_IC_REQ, let plen error. Expect: fail */
1228  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1229  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1230  	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
1231  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1232  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
1233  	nvmf_tcp_pdu_ch_handle(&tqpair);
1234  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1235  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1236  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1237  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
1238  		  (unsigned)SPDK_NVME_TCP_TERM_REQ_ERROR_DATA_MAX_SIZE);
1239  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4);
1240  
1241  	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let plen error. Expect: fail */
1242  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1243  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
1244  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1245  	tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1246  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1247  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
1248  	nvmf_tcp_pdu_ch_handle(&tqpair);
1249  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1250  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1251  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1252  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof(
1253  			  struct spdk_nvme_tcp_term_req_hdr));
1254  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4);
1255  
1256  	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let plen error. Expect: fail */
1257  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1258  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1259  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1260  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1261  	tqpair.pdu_in_progress->hdr.common.pdo = 64;
1262  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
1263  	nvmf_tcp_pdu_ch_handle(&tqpair);
1264  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1265  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1266  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1267  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
1268  		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1269  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4);
1270  
1271  	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ, let plen error. Expect: fail */
1272  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1273  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_TERM_REQ;
1274  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1275  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1276  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_term_req_hdr);
1277  	nvmf_tcp_pdu_ch_handle(&tqpair);
1278  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1279  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1280  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1281  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
1282  		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1283  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 4);
1284  
1285  	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD, let pdo error. Expect: fail */
1286  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1287  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
1288  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1289  	tqpair.cpda = 1;
1290  	tqpair.pdu_in_progress->hdr.common.flags = SPDK_NVME_TCP_CH_FLAGS_HDGSTF;
1291  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1292  	tqpair.pdu_in_progress->hdr.common.pdo = 63;
1293  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
1294  	nvmf_tcp_pdu_ch_handle(&tqpair);
1295  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1296  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1297  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1298  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == sizeof(struct spdk_nvme_tcp_cmd) + sizeof(
1299  			  struct spdk_nvme_tcp_term_req_hdr));
1300  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3);
1301  
1302  	/* Test case: PDU type is SPDK_NVME_TCP_PDU_TYPE_H2C_DATA, let pdo error. Expect: fail */
1303  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1304  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_H2C_DATA;
1305  	tqpair.state = NVMF_TCP_QPAIR_STATE_RUNNING;
1306  	tqpair.cpda = 1;
1307  	tqpair.pdu_in_progress->hdr.common.plen = 0;
1308  	tqpair.pdu_in_progress->hdr.common.pdo = 63;
1309  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_h2c_data_hdr);
1310  	nvmf_tcp_pdu_ch_handle(&tqpair);
1311  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_QUIESCING);
1312  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.pdu_type == SPDK_NVME_TCP_PDU_TYPE_C2H_TERM_REQ);
1313  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.hlen == sizeof(struct spdk_nvme_tcp_term_req_hdr));
1314  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.common.plen == tqpair.mgmt_pdu->hdr.term_req.common.hlen +
1315  		  (unsigned)sizeof(struct spdk_nvme_tcp_term_req_hdr));
1316  	CU_ASSERT(tqpair.mgmt_pdu->hdr.term_req.fei[0] == 3);
1317  
1318  	/* Test case: All parameters is conformed to the function. Expect: PASS */
1319  	tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_CH;
1320  	tqpair.pdu_in_progress->hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_IC_REQ;
1321  	tqpair.state = NVMF_TCP_QPAIR_STATE_INVALID;
1322  	tqpair.pdu_in_progress->hdr.common.plen = sizeof(struct spdk_nvme_tcp_ic_req);
1323  	tqpair.pdu_in_progress->hdr.common.hlen = sizeof(struct spdk_nvme_tcp_ic_req);
1324  	nvmf_tcp_pdu_ch_handle(&tqpair);
1325  	CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH);
1326  	CU_ASSERT(tqpair.pdu_in_progress->psh_len == tqpair.pdu_in_progress->hdr.common.hlen - sizeof(
1327  			  struct spdk_nvme_tcp_common_pdu_hdr));
1328  }
1329  
1330  static void
1331  test_nvmf_tcp_tls_add_remove_credentials(void)
1332  {
1333  	struct spdk_thread *thread;
1334  	struct spdk_nvmf_transport *transport;
1335  	struct spdk_nvmf_tcp_transport *ttransport;
1336  	struct spdk_nvmf_transport_opts opts;
1337  	struct spdk_nvmf_subsystem subsystem;
1338  	struct tcp_psk_entry *entry;
1339  	struct spdk_sock_group grp = {};
1340  	const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"};
1341  	const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"};
1342  	const char *psk = "NVMeTLSkey-1:01:VRLbtnN9AQb2WXW3c9+wEf/DRLz0QuLdbYvEhwtdWwNf9LrZ:";
1343  	bool found = false;
1344  
1345  	thread = spdk_thread_create(NULL, NULL);
1346  	SPDK_CU_ASSERT_FATAL(thread != NULL);
1347  	spdk_set_thread(thread);
1348  
1349  	memset(&opts, 0, sizeof(opts));
1350  	opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
1351  	opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
1352  	opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
1353  	opts.max_io_size = UT_MAX_IO_SIZE;
1354  	opts.io_unit_size = UT_IO_UNIT_SIZE;
1355  	opts.max_aq_depth = UT_MAX_AQ_DEPTH;
1356  	opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
1357  	MOCK_SET(spdk_sock_group_create, &grp);
1358  	transport = nvmf_tcp_create(&opts);
1359  	MOCK_CLEAR_P(spdk_sock_group_create);
1360  
1361  	memset(&subsystem, 0, sizeof(subsystem));
1362  	snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
1363  	snprintf(g_ut_psk.data, sizeof(g_ut_psk.data), "%s", psk);
1364  	g_ut_psk.len = strlen(psk) + 1;
1365  
1366  	struct spdk_json_val psk_json[] = {
1367  		{"", 2, SPDK_JSON_VAL_OBJECT_BEGIN},
1368  		{"psk", 3, SPDK_JSON_VAL_NAME},
1369  		{(void *)g_ut_psk.name, strlen(g_ut_psk.name), SPDK_JSON_VAL_STRING},
1370  		{"", 0, SPDK_JSON_VAL_OBJECT_END},
1371  	};
1372  
1373  	nvmf_tcp_subsystem_add_host(transport, &subsystem, hostnqn, psk_json);
1374  
1375  	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1376  	TAILQ_FOREACH(entry, &ttransport->psks, link) {
1377  		if ((strcmp(subnqn, entry->subnqn) == 0) &&
1378  		    (strcmp(hostnqn, entry->hostnqn) == 0)) {
1379  			found = true;
1380  		}
1381  	}
1382  
1383  	CU_ASSERT(found == true);
1384  	found = false;
1385  
1386  	nvmf_tcp_subsystem_remove_host(transport, &subsystem, hostnqn);
1387  
1388  	ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
1389  	TAILQ_FOREACH(entry, &ttransport->psks, link) {
1390  		if ((strcmp(subnqn, entry->subnqn) == 0) &&
1391  		    (strcmp(hostnqn, entry->hostnqn) == 0)) {
1392  			found = true;
1393  		}
1394  	}
1395  
1396  	CU_ASSERT(found == false);
1397  
1398  	CU_ASSERT(nvmf_tcp_destroy(transport, NULL, NULL) == 0);
1399  
1400  	spdk_thread_exit(thread);
1401  	while (!spdk_thread_is_exited(thread)) {
1402  		spdk_thread_poll(thread, 0, 0);
1403  	}
1404  	spdk_thread_destroy(thread);
1405  }
1406  
1407  static void
1408  test_nvmf_tcp_tls_generate_psk_id(void)
1409  {
1410  	const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"};
1411  	const char subnqn[] = {"nqn.2016-06.io.spdk:cnode1"};
1412  	const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"};
1413  	char psk_id[NVMF_PSK_IDENTITY_LEN] = {};
1414  	char too_small_psk_id[5] = {};
1415  
1416  	/* Check if we can generate expected PSK id. */
1417  	CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn,
1418  			subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) == 0);
1419  	CU_ASSERT(strcmp(psk_id, psk_id_reference) == 0);
1420  
1421  	/* Test with a buffer that is too small to fit PSK id. */
1422  	CU_ASSERT(nvme_tcp_generate_psk_identity(too_small_psk_id, sizeof(too_small_psk_id), hostnqn,
1423  			subnqn, NVME_TCP_CIPHER_AES_128_GCM_SHA256) != 0);
1424  
1425  	/* Test with unknown cipher suite. */
1426  	CU_ASSERT(nvme_tcp_generate_psk_identity(psk_id, NVMF_PSK_IDENTITY_LEN, hostnqn,
1427  			subnqn, UINT8_MAX) != 0);
1428  }
1429  
1430  static void
1431  test_nvmf_tcp_tls_generate_retained_psk(void)
1432  {
1433  	const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"};
1434  	const char psk_reference1[] = {"1234567890ABCDEF"};
1435  	const char psk_reference2[] = {"FEDCBA0987654321"};
1436  	uint8_t unhexlified_str1[SPDK_TLS_PSK_MAX_LEN] = {};
1437  	uint8_t unhexlified_str2[SPDK_TLS_PSK_MAX_LEN] = {};
1438  	char *unhexlified1;
1439  	char *unhexlified2;
1440  	uint8_t psk_retained1[SPDK_TLS_PSK_MAX_LEN] = {};
1441  	uint8_t psk_retained2[SPDK_TLS_PSK_MAX_LEN] = {};
1442  	uint8_t too_small_psk_retained[5] = {};
1443  	int psk_retained_len1, psk_retained_len2;
1444  	int retained_size;
1445  
1446  	unhexlified1 = spdk_unhexlify(psk_reference1);
1447  	SPDK_CU_ASSERT_FATAL(unhexlified1 != NULL);
1448  	unhexlified2 = spdk_unhexlify(psk_reference2);
1449  	SPDK_CU_ASSERT_FATAL(unhexlified2 != NULL);
1450  
1451  	memcpy(unhexlified_str1, unhexlified1, strlen(psk_reference1) / 2);
1452  	memcpy(unhexlified_str2, unhexlified2, strlen(psk_reference2) / 2);
1453  	free(unhexlified1);
1454  	free(unhexlified2);
1455  
1456  	/* Make sure that retained PSKs are different with different input PSKs and the same hash. */
1457  	retained_size = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn,
1458  			psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256);
1459  	CU_ASSERT(retained_size > 0);
1460  
1461  	CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str2, strlen(psk_reference2) / 2, hostnqn,
1462  					       psk_retained2,
1463  					       SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256) > 0);
1464  	CU_ASSERT(memcmp(psk_retained1, psk_retained2, retained_size) != 0);
1465  
1466  	/* Make sure that retained PSKs are different with different hash and the same input PSKs. */
1467  	psk_retained_len1 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2,
1468  			    hostnqn, psk_retained1, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256);
1469  	CU_ASSERT(psk_retained_len1 > 0);
1470  	psk_retained_len2 = nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2,
1471  			    hostnqn, psk_retained2, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA384);
1472  	CU_ASSERT(psk_retained_len2 > 0);
1473  	CU_ASSERT(psk_retained_len1 < psk_retained_len2);
1474  
1475  	/* Make sure that passing unknown value as hash errors out the function. */
1476  	CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn,
1477  					       psk_retained1, SPDK_TLS_PSK_MAX_LEN, -1) < 0);
1478  
1479  	/* Make sure that passing buffer insufficient in size errors out the function. */
1480  	CU_ASSERT(nvme_tcp_derive_retained_psk(unhexlified_str1, strlen(psk_reference1) / 2, hostnqn,
1481  					       too_small_psk_retained, sizeof(too_small_psk_retained), NVME_TCP_HASH_ALGORITHM_SHA256) < 0);
1482  }
1483  
1484  static void
1485  test_nvmf_tcp_tls_generate_tls_psk(void)
1486  {
1487  	const char psk_id_reference[] = {"NVMe0R01 nqn.2016-06.io.spdk:host1 nqn.2016-06.io.spdk:cnode1"};
1488  	const char hostnqn[] = {"nqn.2016-06.io.spdk:host1"};
1489  	const char psk_reference[] = {"1234567890ABCDEF"};
1490  	char *unhexlified;
1491  	uint8_t unhexlified_str[SPDK_TLS_PSK_MAX_LEN] = {};
1492  	uint8_t psk_retained[SPDK_TLS_PSK_MAX_LEN] = {};
1493  	uint8_t psk_key1[SPDK_TLS_PSK_MAX_LEN] = {}, psk_key2[SPDK_TLS_PSK_MAX_LEN] = {};
1494  	uint8_t too_small_psk_tls[5] = {};
1495  	int retained_size, tls_size;
1496  
1497  	unhexlified = spdk_unhexlify(psk_reference);
1498  	CU_ASSERT(unhexlified != NULL);
1499  
1500  	memcpy(unhexlified_str, unhexlified, strlen(psk_reference) / 2);
1501  	free(unhexlified);
1502  
1503  	retained_size = nvme_tcp_derive_retained_psk(unhexlified_str, strlen(psk_reference) / 2, hostnqn,
1504  			psk_retained, SPDK_TLS_PSK_MAX_LEN, NVME_TCP_HASH_ALGORITHM_SHA256);
1505  	CU_ASSERT(retained_size > 0);
1506  
1507  	/* Make sure that different cipher suites produce different TLS PSKs. */
1508  	tls_size = nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key1,
1509  					   SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_128_GCM_SHA256);
1510  	CU_ASSERT(tls_size > 0);
1511  	CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference, psk_key2,
1512  					  SPDK_TLS_PSK_MAX_LEN, NVME_TCP_CIPHER_AES_256_GCM_SHA384) > 0);
1513  	CU_ASSERT(memcmp(psk_key1, psk_key2, tls_size) != 0);
1514  
1515  	/* Make sure that passing unknown value as hash errors out the function. */
1516  	CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference,
1517  					  psk_key1, SPDK_TLS_PSK_MAX_LEN, UINT8_MAX) < 0);
1518  
1519  	/* Make sure that passing buffer insufficient in size errors out the function. */
1520  	CU_ASSERT(nvme_tcp_derive_tls_psk(psk_retained, retained_size, psk_id_reference,
1521  					  too_small_psk_tls, sizeof(too_small_psk_tls),
1522  					  NVME_TCP_CIPHER_AES_128_GCM_SHA256) < 0);
1523  }
1524  
1525  int
1526  main(int argc, char **argv)
1527  {
1528  	CU_pSuite	suite = NULL;
1529  	unsigned int	num_failures;
1530  
1531  	CU_initialize_registry();
1532  
1533  	suite = CU_add_suite("nvmf", NULL, NULL);
1534  
1535  	CU_ADD_TEST(suite, test_nvmf_tcp_create);
1536  	CU_ADD_TEST(suite, test_nvmf_tcp_destroy);
1537  	CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
1538  	CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
1539  	CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
1540  	CU_ADD_TEST(suite, test_nvmf_tcp_in_capsule_data_handle);
1541  	CU_ADD_TEST(suite, test_nvmf_tcp_qpair_init_mem_resource);
1542  	CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_term_req);
1543  	CU_ADD_TEST(suite, test_nvmf_tcp_send_capsule_resp_pdu);
1544  	CU_ADD_TEST(suite, test_nvmf_tcp_icreq_handle);
1545  	CU_ADD_TEST(suite, test_nvmf_tcp_check_xfer_type);
1546  	CU_ADD_TEST(suite, test_nvmf_tcp_invalid_sgl);
1547  	CU_ADD_TEST(suite, test_nvmf_tcp_pdu_ch_handle);
1548  	CU_ADD_TEST(suite, test_nvmf_tcp_tls_add_remove_credentials);
1549  	CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_psk_id);
1550  	CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_retained_psk);
1551  	CU_ADD_TEST(suite, test_nvmf_tcp_tls_generate_tls_psk);
1552  
1553  	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1554  	CU_cleanup_registry();
1555  	return num_failures;
1556  }
1557