xref: /spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c (revision 2c140f58ffe19fb26bb9d25f4df8ac7937a32557)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_internal/cunit.h"
8 #include "nvme/nvme_rdma.c"
9 #include "common/lib/nvme/common_stubs.h"
10 #include "common/lib/test_rdma.c"
11 
12 SPDK_LOG_REGISTER_COMPONENT(nvme)
13 
14 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
15 		uint64_t size, uint64_t translation), 0);
16 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
17 		uint64_t size), 0);
18 
19 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
20 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
21 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
22 
23 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
24 
25 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
26 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
27 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
28 
29 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
30 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
31 DEFINE_STUB(fcntl, int, (int fd, int cmd, ...), 0);
32 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
33 
34 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
35 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
36 
37 DEFINE_STUB(spdk_rdma_provider_accel_sequence_supported, bool, (void), false);
38 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
39 	    (struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA);
40 DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device));
41 DEFINE_STUB(spdk_memory_domain_pull_data, int, (struct spdk_memory_domain *src_domain,
42 		void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
43 		uint32_t dst_iov_cnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
44 DEFINE_STUB(spdk_rdma_cm_id_get_numa_id, int32_t, (struct rdma_cm_id *cm_id), 0);
45 
46 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
47 		struct spdk_nvme_cmd *cmd));
48 
49 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
50 		struct spdk_nvme_cpl *cpl));
51 DEFINE_STUB(spdk_nvme_qpair_is_connected, bool, (struct spdk_nvme_qpair *qpair), true);
52 DEFINE_RETURN_MOCK(spdk_memory_domain_create, int);
53 int
54 spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,
55 			  struct spdk_memory_domain_ctx *ctx, const char *id)
56 {
57 	static struct spdk_memory_domain *__dma_dev = (struct spdk_memory_domain *)0xdeaddead;
58 
59 	HANDLE_RETURN_MOCK(spdk_memory_domain_create);
60 
61 	*domain = __dma_dev;
62 
63 	return 0;
64 }
65 
66 static struct spdk_memory_domain_translation_result g_memory_translation_translation = {.size = sizeof(struct spdk_memory_domain_translation_result) };
67 
68 DEFINE_RETURN_MOCK(spdk_memory_domain_translate_data, int);
69 int
70 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
71 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
72 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
73 {
74 
75 	HANDLE_RETURN_MOCK(spdk_memory_domain_translate_data);
76 
77 	memcpy(result, &g_memory_translation_translation, sizeof(g_memory_translation_translation));
78 
79 	return 0;
80 }
81 
82 /* ibv_reg_mr can be a macro, need to undefine it */
83 #ifdef ibv_reg_mr
84 #undef ibv_reg_mr
85 #endif
86 
87 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
88 struct ibv_mr *
89 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
90 {
91 	HANDLE_RETURN_MOCK(ibv_reg_mr);
92 	if (length > 0) {
93 		return &g_rdma_mr;
94 	} else {
95 		return NULL;
96 	}
97 }
98 
99 struct nvme_rdma_ut_bdev_io {
100 	struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
101 	int iovpos;
102 	int iovcnt;
103 };
104 
105 DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **);
106 struct ibv_context **
107 rdma_get_devices(int *num_devices)
108 {
109 	static struct ibv_context *_contexts[] = {
110 		(struct ibv_context *)0xDEADBEEF,
111 		(struct ibv_context *)0xFEEDBEEF,
112 		NULL
113 	};
114 
115 	HANDLE_RETURN_MOCK(rdma_get_devices);
116 	return _contexts;
117 }
118 
119 DEFINE_RETURN_MOCK(rdma_create_event_channel, struct rdma_event_channel *);
120 struct rdma_event_channel *
121 rdma_create_event_channel(void)
122 {
123 	HANDLE_RETURN_MOCK(rdma_create_event_channel);
124 	return NULL;
125 }
126 
127 DEFINE_RETURN_MOCK(ibv_query_device, int);
128 int
129 ibv_query_device(struct ibv_context *context,
130 		 struct ibv_device_attr *device_attr)
131 {
132 	if (device_attr) {
133 		device_attr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
134 	}
135 	HANDLE_RETURN_MOCK(ibv_query_device);
136 
137 	return 0;
138 }
139 
140 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
141 static void
142 nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
143 {
144 	struct nvme_rdma_ut_bdev_io *bio = cb_arg;
145 	struct iovec *iov;
146 
147 	for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
148 		iov = &bio->iovs[bio->iovpos];
149 		/* Only provide offsets at the beginning of an iov */
150 		if (offset == 0) {
151 			break;
152 		}
153 
154 		offset -= iov->iov_len;
155 	}
156 
157 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
158 }
159 
160 static int
161 nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
162 {
163 	struct nvme_rdma_ut_bdev_io *bio = cb_arg;
164 	struct iovec *iov;
165 
166 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
167 
168 	if (bio->iovpos == bio->iovcnt) {
169 		return -1;
170 	}
171 
172 	iov = &bio->iovs[bio->iovpos];
173 
174 	*address = iov->iov_base;
175 	*length = iov->iov_len;
176 	bio->iovpos++;
177 
178 	return 0;
179 }
180 
181 static void
182 test_nvme_rdma_build_sgl_request(void)
183 {
184 	struct nvme_rdma_qpair rqpair;
185 	struct spdk_nvme_ctrlr ctrlr = {0};
186 	struct spdk_nvmf_cmd cmd = {{0}};
187 	struct spdk_nvme_rdma_req rdma_req = {0};
188 	struct nvme_request req = {{0}};
189 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
190 	uint64_t i;
191 	int rc;
192 
193 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
194 	ctrlr.cdata.nvmf_specific.msdbd = 16;
195 	ctrlr.ioccsz_bytes = 4096;
196 
197 	rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef;
198 	rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef;
199 	rqpair.qpair.ctrlr = &ctrlr;
200 	rqpair.cmds = &cmd;
201 	cmd.sgl[0].address = 0x1111;
202 	rdma_req.id = 0;
203 	rdma_req.req = &req;
204 
205 	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
206 	req.qpair = &rqpair.qpair;
207 
208 	for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
209 		bio.iovs[i].iov_base = (void *)0xF00000000 + i + 1;
210 		bio.iovs[i].iov_len = 0;
211 	}
212 
213 	/* Test case 1: single SGL. Expected: PASS */
214 	bio.iovpos = 0;
215 	req.payload_offset = 0;
216 	req.payload_size = 0x1000;
217 	bio.iovs[0].iov_len = 0x1000;
218 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
219 	SPDK_CU_ASSERT_FATAL(rc == 0);
220 	CU_ASSERT(bio.iovpos == 1);
221 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
222 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
223 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
224 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
225 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
226 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
227 
228 	/* Test case 2: multiple SGL. Expected: PASS */
229 	bio.iovpos = 0;
230 	req.payload_offset = 0;
231 	req.payload_size = 0x4000;
232 	for (i = 0; i < 4; i++) {
233 		bio.iovs[i].iov_len = 0x1000;
234 	}
235 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
236 	SPDK_CU_ASSERT_FATAL(rc == 0);
237 	CU_ASSERT(bio.iovpos == 4);
238 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
239 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
240 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
241 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
242 	CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
243 			  struct spdk_nvme_cmd))
244 	for (i = 0; i < 4; i++) {
245 		CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
246 		CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
247 		CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
248 		CU_ASSERT(cmd.sgl[i].keyed.key == RDMA_UT_RKEY);
249 		CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
250 	}
251 
252 	/* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
253 	bio.iovpos = 0;
254 	req.payload_offset = 0;
255 	g_mr_size = 0x800;
256 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
257 	SPDK_CU_ASSERT_FATAL(rc != 0);
258 	CU_ASSERT(bio.iovpos == 1);
259 
260 	/* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
261 	bio.iovpos = 0;
262 	bio.iovcnt = 4;
263 	req.payload_offset = 0;
264 	req.payload_size = 0x6000;
265 	g_mr_size = 0x0;
266 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
267 	SPDK_CU_ASSERT_FATAL(rc != 0);
268 	CU_ASSERT(bio.iovpos == bio.iovcnt);
269 	bio.iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS;
270 
271 	/* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
272 	req.payload_size = 0x1000 + (1 << 24);
273 	bio.iovs[0].iov_len = 0x1000;
274 	bio.iovs[1].iov_len = 1 << 24;
275 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
276 	SPDK_CU_ASSERT_FATAL(rc != 0);
277 
278 	/* Test case 6: 4 SGL descriptors, size of SGL descriptors exceeds ICD. Expected: FAIL */
279 	ctrlr.ioccsz_bytes = 60;
280 	bio.iovpos = 0;
281 	req.payload_offset = 0;
282 	req.payload_size = 0x4000;
283 	for (i = 0; i < 4; i++) {
284 		bio.iovs[i].iov_len = 0x1000;
285 	}
286 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
287 	SPDK_CU_ASSERT_FATAL(rc == -1);
288 }
289 
290 static void
291 test_nvme_rdma_build_sgl_inline_request(void)
292 {
293 	struct nvme_rdma_qpair rqpair;
294 	struct spdk_nvme_ctrlr ctrlr = {0};
295 	struct spdk_nvmf_cmd cmd = {{0}};
296 	struct spdk_nvme_rdma_req rdma_req = {0};
297 	struct nvme_request req = {{0}};
298 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
299 	int rc;
300 
301 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
302 	ctrlr.cdata.nvmf_specific.msdbd = 16;
303 
304 	rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef;
305 	rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef;
306 	rqpair.qpair.ctrlr = &ctrlr;
307 	rqpair.cmds = &cmd;
308 	cmd.sgl[0].address = 0x1111;
309 	rdma_req.id = 0;
310 	rdma_req.req = &req;
311 
312 	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
313 	req.qpair = &rqpair.qpair;
314 
315 	/* Test case 1: single inline SGL. Expected: PASS */
316 	bio.iovpos = 0;
317 	req.payload_offset = 0;
318 	req.payload_size = 0x1000;
319 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
320 	bio.iovs[0].iov_len = 0x1000;
321 	rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
322 	SPDK_CU_ASSERT_FATAL(rc == 0);
323 	CU_ASSERT(bio.iovpos == 1);
324 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
325 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
326 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
327 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
328 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
329 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
330 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
331 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
332 
333 	/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
334 	bio.iovpos = 0;
335 	req.payload_offset = 0;
336 	req.payload_size = 1 << 24;
337 	bio.iovs[0].iov_len = 1 << 24;
338 	rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
339 	SPDK_CU_ASSERT_FATAL(rc == 0);
340 	CU_ASSERT(bio.iovpos == 1);
341 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
342 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
343 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
344 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
345 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
346 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
347 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
348 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
349 }
350 
351 static void
352 test_nvme_rdma_build_contig_request(void)
353 {
354 	struct nvme_rdma_qpair rqpair;
355 	struct spdk_nvme_ctrlr ctrlr = {0};
356 	struct spdk_nvmf_cmd cmd = {{0}};
357 	struct spdk_nvme_rdma_req rdma_req = {0};
358 	struct nvme_request req = {{0}};
359 	int rc;
360 
361 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
362 	ctrlr.cdata.nvmf_specific.msdbd = 16;
363 
364 	rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef;
365 	rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef;
366 	rqpair.qpair.ctrlr = &ctrlr;
367 	rqpair.cmds = &cmd;
368 	cmd.sgl[0].address = 0x1111;
369 	rdma_req.id = 0;
370 	rdma_req.req = &req;
371 
372 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
373 	req.qpair = &rqpair.qpair;
374 
375 	/* Test case 1: contig request. Expected: PASS */
376 	req.payload_offset = 0;
377 	req.payload_size = 0x1000;
378 	rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
379 	SPDK_CU_ASSERT_FATAL(rc == 0);
380 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
381 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
382 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
383 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
384 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
385 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
386 
387 	/* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
388 	req.payload_offset = 0;
389 	req.payload_size = 1 << 24;
390 	rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
391 	SPDK_CU_ASSERT_FATAL(rc != 0);
392 }
393 
394 static void
395 test_nvme_rdma_build_contig_inline_request(void)
396 {
397 	struct nvme_rdma_qpair rqpair;
398 	struct spdk_nvme_ctrlr ctrlr = {0};
399 	struct spdk_nvmf_cmd cmd = {{0}};
400 	struct spdk_nvme_rdma_req rdma_req = {0};
401 	struct nvme_request req = {{0}};
402 	int rc;
403 
404 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
405 	ctrlr.cdata.nvmf_specific.msdbd = 16;
406 
407 	rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef;
408 	rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef;
409 	rqpair.qpair.ctrlr = &ctrlr;
410 	rqpair.cmds = &cmd;
411 	cmd.sgl[0].address = 0x1111;
412 	rdma_req.id = 0;
413 	rdma_req.req = &req;
414 
415 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
416 	req.qpair = &rqpair.qpair;
417 
418 	/* Test case 1: single inline SGL. Expected: PASS */
419 	req.payload_offset = 0;
420 	req.payload_size = 0x1000;
421 	rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
422 	SPDK_CU_ASSERT_FATAL(rc == 0);
423 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
424 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
425 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
426 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
427 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
428 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
429 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
430 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
431 
432 	/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
433 	req.payload_offset = 0;
434 	req.payload_size = 1 << 24;
435 	rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
436 	SPDK_CU_ASSERT_FATAL(rc == 0);
437 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
438 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
439 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
440 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
441 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
442 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
443 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
444 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
445 }
446 
447 static void
448 test_nvme_rdma_create_reqs(void)
449 {
450 	struct nvme_rdma_qpair rqpair = {};
451 	int rc;
452 
453 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
454 
455 	/* Test case 1: zero entry. Expect: FAIL */
456 	rqpair.num_entries = 0;
457 
458 	rc = nvme_rdma_create_reqs(&rqpair);
459 	CU_ASSERT(rqpair.rdma_reqs == NULL);
460 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
461 
462 	/* Test case 2: single entry. Expect: PASS */
463 	memset(&rqpair, 0, sizeof(rqpair));
464 	rqpair.num_entries = 1;
465 
466 	rc = nvme_rdma_create_reqs(&rqpair);
467 	CU_ASSERT(rc == 0);
468 	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == g_rdma_mr.lkey);
469 	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr
470 		  == (uint64_t)&rqpair.cmds[0]);
471 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id
472 		  == (uint64_t)&rqpair.rdma_reqs[0].rdma_wr);
473 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.next == NULL);
474 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.opcode == IBV_WR_SEND);
475 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.send_flags == IBV_SEND_SIGNALED);
476 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.sg_list
477 		  == rqpair.rdma_reqs[0].send_sgl);
478 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.imm_data == 0);
479 	spdk_free(rqpair.rdma_reqs);
480 	spdk_free(rqpair.cmds);
481 
482 	/* Test case 3: multiple entries. Expect: PASS */
483 	memset(&rqpair, 0, sizeof(rqpair));
484 	rqpair.num_entries = 5;
485 
486 	rc = nvme_rdma_create_reqs(&rqpair);
487 	CU_ASSERT(rc == 0);
488 	for (int i = 0; i < 5; i++) {
489 		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].lkey == g_rdma_mr.lkey);
490 		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr
491 			  == (uint64_t)&rqpair.cmds[i]);
492 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id
493 			  == (uint64_t)&rqpair.rdma_reqs[i].rdma_wr);
494 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.next == NULL);
495 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.opcode == IBV_WR_SEND);
496 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.send_flags
497 			  == IBV_SEND_SIGNALED);
498 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.sg_list
499 			  == rqpair.rdma_reqs[i].send_sgl);
500 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.imm_data == 0);
501 	}
502 	spdk_free(rqpair.rdma_reqs);
503 	spdk_free(rqpair.cmds);
504 }
505 
506 static void
507 test_nvme_rdma_create_rsps(void)
508 {
509 	struct nvme_rdma_rsp_opts opts = {};
510 	struct nvme_rdma_rsps *rsps;
511 	struct spdk_rdma_provider_qp *rdma_qp = (struct spdk_rdma_provider_qp *)0xfeedf00d;
512 	struct nvme_rdma_qpair rqpair = { .rdma_qp = rdma_qp, };
513 
514 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
515 
516 	opts.rqpair = &rqpair;
517 
518 	/* Test case 1 calloc false */
519 	opts.num_entries = 0;
520 	rsps = nvme_rdma_create_rsps(&opts);
521 	SPDK_CU_ASSERT_FATAL(rsps == NULL);
522 
523 	/* Test case 2 calloc success */
524 	opts.num_entries = 1;
525 
526 	rsps = nvme_rdma_create_rsps(&opts);
527 	SPDK_CU_ASSERT_FATAL(rsps != NULL);
528 	CU_ASSERT(rsps->rsp_sgls != NULL);
529 	CU_ASSERT(rsps->rsp_recv_wrs != NULL);
530 	CU_ASSERT(rsps->rsps != NULL);
531 	CU_ASSERT(rsps->rsp_sgls[0].lkey == g_rdma_mr.lkey);
532 	CU_ASSERT(rsps->rsp_sgls[0].addr == (uint64_t)&rsps->rsps[0]);
533 	CU_ASSERT(rsps->rsp_recv_wrs[0].wr_id == (uint64_t)&rsps->rsps[0].rdma_wr);
534 
535 	nvme_rdma_free_rsps(rsps);
536 }
537 
538 static void
539 test_nvme_rdma_ctrlr_create_qpair(void)
540 {
541 	struct spdk_nvme_ctrlr ctrlr = {};
542 	uint16_t qid, qsize;
543 	struct spdk_nvme_qpair *qpair;
544 	struct nvme_rdma_qpair *rqpair;
545 
546 	/* Test case 1: max qsize. Expect: PASS */
547 	qsize = 0xffff;
548 	qid = 1;
549 
550 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
551 					     SPDK_NVME_QPRIO_URGENT, 1,
552 					     false, false);
553 	CU_ASSERT(qpair != NULL);
554 	rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
555 	CU_ASSERT(qpair == &rqpair->qpair);
556 	CU_ASSERT(rqpair->num_entries == qsize - 1);
557 	CU_ASSERT(rqpair->delay_cmd_submit == false);
558 
559 	spdk_free(rqpair);
560 	rqpair = NULL;
561 
562 	/* Test case 2: queue size 2. Expect: PASS */
563 	qsize = 2;
564 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
565 					     SPDK_NVME_QPRIO_URGENT, 1,
566 					     false, false);
567 	CU_ASSERT(qpair != NULL);
568 	rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
569 	CU_ASSERT(rqpair->num_entries == qsize - 1);
570 
571 	spdk_free(rqpair);
572 	rqpair = NULL;
573 
574 	/* Test case 3: queue size zero. Expect: FAIL */
575 	qsize = 0;
576 
577 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
578 					     SPDK_NVME_QPRIO_URGENT, 1,
579 					     false, false);
580 	SPDK_CU_ASSERT_FATAL(qpair == NULL);
581 
582 	/* Test case 4: queue size 1. Expect: FAIL */
583 	qsize = 1;
584 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
585 					     SPDK_NVME_QPRIO_URGENT, 1,
586 					     false, false);
587 	SPDK_CU_ASSERT_FATAL(qpair == NULL);
588 }
589 
590 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, void *cq_context,
591 		struct ibv_comp_channel *channel, int comp_vector), (struct ibv_cq *)0xFEEDBEEF);
592 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
593 
594 static void
595 test_nvme_rdma_poller_create(void)
596 {
597 	struct nvme_rdma_poll_group	group = {};
598 	struct ibv_context context = {
599 		.device = (struct ibv_device *)0xDEADBEEF
600 	};
601 	struct ibv_context context_2 = {
602 		.device = (struct ibv_device *)0xBAADBEEF
603 	};
604 	struct nvme_rdma_poller *poller_1, *poller_2, *poller_3;
605 
606 	/* Case: calloc and ibv not need to fail test */
607 	STAILQ_INIT(&group.pollers);
608 
609 	poller_1 = nvme_rdma_poll_group_get_poller(&group, &context);
610 	SPDK_CU_ASSERT_FATAL(poller_1 != NULL);
611 	CU_ASSERT(group.num_pollers == 1);
612 	CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_1);
613 	CU_ASSERT(poller_1->refcnt == 1);
614 	CU_ASSERT(poller_1->device == &context);
615 	CU_ASSERT(poller_1->cq == (struct ibv_cq *)0xFEEDBEEF);
616 	CU_ASSERT(poller_1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
617 	CU_ASSERT(poller_1->required_num_wc == 0);
618 
619 	poller_2 = nvme_rdma_poll_group_get_poller(&group, &context_2);
620 	SPDK_CU_ASSERT_FATAL(poller_2 != NULL);
621 	CU_ASSERT(group.num_pollers == 2);
622 	CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_2);
623 	CU_ASSERT(poller_2->refcnt == 1);
624 	CU_ASSERT(poller_2->device == &context_2);
625 
626 	poller_3 = nvme_rdma_poll_group_get_poller(&group, &context);
627 	SPDK_CU_ASSERT_FATAL(poller_3 != NULL);
628 	CU_ASSERT(poller_3 == poller_1);
629 	CU_ASSERT(group.num_pollers == 2);
630 	CU_ASSERT(poller_3->refcnt == 2);
631 
632 	nvme_rdma_poll_group_put_poller(&group, poller_2);
633 	CU_ASSERT(group.num_pollers == 1);
634 
635 	nvme_rdma_poll_group_put_poller(&group, poller_1);
636 	CU_ASSERT(group.num_pollers == 1);
637 	CU_ASSERT(poller_3->refcnt == 1);
638 
639 	nvme_rdma_poll_group_put_poller(&group, poller_3);
640 	CU_ASSERT(STAILQ_EMPTY(&group.pollers));
641 	CU_ASSERT(group.num_pollers == 0);
642 
643 	nvme_rdma_poll_group_free_pollers(&group);
644 }
645 
646 static void
647 test_nvme_rdma_qpair_process_cm_event(void)
648 {
649 	struct nvme_rdma_qpair rqpair = {};
650 	struct rdma_cm_event	 event = {};
651 	struct spdk_nvmf_rdma_accept_private_data	accept_data = {};
652 	int rc = 0;
653 
654 	/* case1: event == RDMA_CM_EVENT_ADDR_RESOLVED */
655 	rqpair.evt = &event;
656 	event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
657 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
658 	CU_ASSERT(rc == 0);
659 
660 	/* case2: event == RDMA_CM_EVENT_CONNECT_REQUEST */
661 	rqpair.evt = &event;
662 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
663 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
664 	CU_ASSERT(rc == 0);
665 
666 	/* case3: event == RDMA_CM_EVENT_CONNECT_ERROR */
667 	rqpair.evt = &event;
668 	event.event = RDMA_CM_EVENT_CONNECT_ERROR;
669 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
670 	CU_ASSERT(rc == 0);
671 
672 	/* case4: event == RDMA_CM_EVENT_UNREACHABLE */
673 	rqpair.evt = &event;
674 	event.event = RDMA_CM_EVENT_UNREACHABLE;
675 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
676 	CU_ASSERT(rc == 0);
677 
678 	/* case5: event == RDMA_CM_EVENT_CONNECT_RESPONSE */
679 	rqpair.evt = &event;
680 	event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
681 	event.param.conn.private_data = NULL;
682 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
683 	CU_ASSERT(rc == -1);
684 
685 	rqpair.evt = &event;
686 	event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
687 	event.param.conn.private_data = &accept_data;
688 	accept_data.crqsize = 512;
689 	rqpair.num_entries = 1024;
690 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
691 	CU_ASSERT(rc == 0);
692 	CU_ASSERT(rqpair.num_entries == 1024);
693 
694 	/* case6: event == RDMA_CM_EVENT_DISCONNECTED */
695 	rqpair.evt = &event;
696 	event.event = RDMA_CM_EVENT_DISCONNECTED;
697 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
698 	CU_ASSERT(rc == 0);
699 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_REMOTE);
700 
701 	/* case7: event == RDMA_CM_EVENT_DEVICE_REMOVAL */
702 	rqpair.evt = &event;
703 	event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
704 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
705 	CU_ASSERT(rc == 0);
706 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
707 
708 	/* case8: event == RDMA_CM_EVENT_MULTICAST_JOIN */
709 	rqpair.evt = &event;
710 	event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
711 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
712 	CU_ASSERT(rc == 0);
713 
714 	/* case9: event == RDMA_CM_EVENT_ADDR_CHANGE */
715 	rqpair.evt = &event;
716 	event.event = RDMA_CM_EVENT_ADDR_CHANGE;
717 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
718 	CU_ASSERT(rc == 0);
719 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
720 
721 	/* case10: event == RDMA_CM_EVENT_TIMEWAIT_EXIT */
722 	rqpair.evt = &event;
723 	event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
724 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
725 	CU_ASSERT(rc == 0);
726 
727 	/* case11: default event == 0xFF */
728 	rqpair.evt = &event;
729 	event.event = 0xFF;
730 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
731 	CU_ASSERT(rc == 0);
732 }
733 
734 static void
735 test_nvme_rdma_ctrlr_construct(void)
736 {
737 	struct spdk_nvme_ctrlr *ctrlr;
738 	struct spdk_nvme_transport_id trid = {};
739 	struct spdk_nvme_ctrlr_opts opts = {};
740 	struct nvme_rdma_qpair *rqpair = NULL;
741 	struct nvme_rdma_ctrlr *rctrlr = NULL;
742 	struct rdma_event_channel cm_channel = {};
743 	void *devhandle = NULL;
744 	int rc;
745 
746 	opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT + 1;
747 	opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
748 	opts.admin_queue_size = 0xFFFF;
749 	trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
750 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
751 	MOCK_SET(rdma_create_event_channel, &cm_channel);
752 
753 	ctrlr = nvme_rdma_ctrlr_construct(&trid, &opts, devhandle);
754 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
755 	CU_ASSERT(ctrlr->opts.transport_retry_count ==
756 		  NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT);
757 	CU_ASSERT(ctrlr->opts.transport_ack_timeout ==
758 		  NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
759 	CU_ASSERT(ctrlr->opts.admin_queue_size == opts.admin_queue_size);
760 	rctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr);
761 	CU_ASSERT(rctrlr->max_sge == NVME_RDMA_MAX_SGL_DESCRIPTORS);
762 	CU_ASSERT(rctrlr->cm_channel == &cm_channel);
763 	CU_ASSERT(!strncmp((char *)&rctrlr->ctrlr.trid,
764 			   (char *)&trid, sizeof(trid)));
765 
766 	SPDK_CU_ASSERT_FATAL(ctrlr->adminq != NULL);
767 	rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair);
768 	CU_ASSERT(rqpair->num_entries == opts.admin_queue_size - 1);
769 	CU_ASSERT(rqpair->delay_cmd_submit == false);
770 	MOCK_CLEAR(rdma_create_event_channel);
771 
772 	/* Hardcode the trtype, because nvme_qpair_init() is stub function. */
773 	rqpair->qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
774 	rc = nvme_rdma_ctrlr_destruct(ctrlr);
775 	CU_ASSERT(rc == 0);
776 }
777 
778 static void
779 test_nvme_rdma_req_put_and_get(void)
780 {
781 	struct nvme_rdma_qpair rqpair = {};
782 	struct spdk_nvme_rdma_req rdma_req = {};
783 	struct spdk_nvme_rdma_req *rdma_req_get;
784 
785 	/* case 1: nvme_rdma_req_put */
786 	TAILQ_INIT(&rqpair.free_reqs);
787 	rdma_req.completion_flags = 1;
788 	rdma_req.req = (struct nvme_request *)0xDEADBEFF;
789 	rdma_req.id = 10086;
790 	nvme_rdma_req_put(&rqpair, &rdma_req);
791 
792 	CU_ASSERT(rqpair.free_reqs.tqh_first == &rdma_req);
793 	CU_ASSERT(rqpair.free_reqs.tqh_first->completion_flags == 0);
794 	CU_ASSERT(rqpair.free_reqs.tqh_first->req == NULL);
795 	CU_ASSERT(rqpair.free_reqs.tqh_first->id == 10086);
796 	CU_ASSERT(rdma_req.completion_flags == 0);
797 	CU_ASSERT(rdma_req.req == NULL);
798 
799 	/* case 2: nvme_rdma_req_get */
800 	rdma_req_get = nvme_rdma_req_get(&rqpair);
801 	CU_ASSERT(rdma_req_get == &rdma_req);
802 	CU_ASSERT(rdma_req_get->id == 10086);
803 	CU_ASSERT(rqpair.free_reqs.tqh_first == NULL);
804 }
805 
806 static void
807 test_nvme_rdma_req_init(void)
808 {
809 	struct nvme_rdma_qpair rqpair = {};
810 	struct spdk_nvme_ctrlr ctrlr = {};
811 	struct spdk_nvmf_cmd cmd = {};
812 	struct spdk_nvme_rdma_req rdma_req = {};
813 	struct nvme_request req = {};
814 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
815 	int rc = 1;
816 
817 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
818 	ctrlr.cdata.nvmf_specific.msdbd = 16;
819 
820 	rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef;
821 	rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef;
822 	rqpair.qpair.ctrlr = &ctrlr;
823 	rqpair.cmds = &cmd;
824 	cmd.sgl[0].address = 0x1111;
825 	rdma_req.id = 0;
826 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
827 
828 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
829 	/* case 1: req->payload_size == 0, expect: pass. */
830 	req.payload_size = 0;
831 	rqpair.qpair.ctrlr->ioccsz_bytes = 1024;
832 	rqpair.qpair.ctrlr->icdoff = 0;
833 	rdma_req.req = &req;
834 	req.cmd.cid = rdma_req.id;
835 	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
836 	CU_ASSERT(rc == 0);
837 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
838 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
839 	CU_ASSERT(rdma_req.send_wr.num_sge == 1);
840 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
841 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
842 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == 0);
843 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == 0);
844 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
845 
846 	/* case 2: payload_type == NVME_PAYLOAD_TYPE_CONTIG, expect: pass. */
847 	/* icd_supported is true */
848 	rqpair.qpair.ctrlr->icdoff = 0;
849 	req.payload_offset = 0;
850 	req.payload_size = 1024;
851 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
852 	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
853 	CU_ASSERT(rc == 0);
854 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
855 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
856 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
857 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
858 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
859 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
860 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
861 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
862 
863 	/* icd_supported is false */
864 	rqpair.qpair.ctrlr->icdoff = 1;
865 	req.payload_offset = 0;
866 	req.payload_size = 1024;
867 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
868 	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
869 	CU_ASSERT(rc == 0);
870 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
871 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
872 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
873 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
874 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
875 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
876 
877 	/* case 3: payload_type == NVME_PAYLOAD_TYPE_SGL, expect: pass. */
878 	/* icd_supported is true */
879 	rqpair.qpair.ctrlr->icdoff = 0;
880 	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
881 	req.qpair = &rqpair.qpair;
882 	bio.iovpos = 0;
883 	req.payload_offset = 0;
884 	req.payload_size = 1024;
885 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
886 	bio.iovs[0].iov_len = 1024;
887 	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
888 	CU_ASSERT(rc == 0);
889 	CU_ASSERT(bio.iovpos == 1);
890 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
891 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
892 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
893 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
894 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
895 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
896 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
897 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
898 
899 	/* icd_supported is false */
900 	rqpair.qpair.ctrlr->icdoff = 1;
901 	req.payload = NVME_PAYLOAD_SGL(nvme_rdma_ut_reset_sgl, nvme_rdma_ut_next_sge, &bio, NULL);
902 	req.qpair = &rqpair.qpair;
903 	bio.iovpos = 0;
904 	req.payload_offset = 0;
905 	req.payload_size = 1024;
906 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
907 	bio.iovs[0].iov_len = 1024;
908 	rc = nvme_rdma_req_init(&rqpair, &rdma_req);
909 	CU_ASSERT(rc == 0);
910 	CU_ASSERT(bio.iovpos == 1);
911 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
912 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
913 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
914 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
915 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
916 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
917 }
918 
919 static void
920 test_nvme_rdma_validate_cm_event(void)
921 {
922 	enum rdma_cm_event_type expected_evt_type;
923 	struct rdma_cm_event reaped_evt = {};
924 	int rc;
925 
926 	/* case 1: expected_evt_type == reaped_evt->event, expect: pass */
927 	expected_evt_type = RDMA_CM_EVENT_ADDR_RESOLVED;
928 	reaped_evt.event = RDMA_CM_EVENT_ADDR_RESOLVED;
929 
930 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
931 	CU_ASSERT(rc == 0);
932 
933 	/* case 2: expected_evt_type != RDMA_CM_EVENT_ESTABLISHED and is not equal to reaped_evt->event, expect: fail */
934 	reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
935 
936 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
937 	CU_ASSERT(rc == -EBADMSG);
938 
939 	/* case 3: expected_evt_type == RDMA_CM_EVENT_ESTABLISHED */
940 	expected_evt_type = RDMA_CM_EVENT_ESTABLISHED;
941 	/* reaped_evt->event == RDMA_CM_EVENT_REJECTED and reaped_evt->status == 10, expect: fail */
942 	reaped_evt.event = RDMA_CM_EVENT_REJECTED;
943 	reaped_evt.status = 10;
944 
945 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
946 	CU_ASSERT(rc == -ESTALE);
947 
948 	/* reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE, expect: pass */
949 	reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
950 
951 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
952 	CU_ASSERT(rc == 0);
953 }
954 
955 static void
956 test_nvme_rdma_qpair_init(void)
957 {
958 	struct nvme_rdma_qpair		rqpair = {};
959 	struct rdma_cm_id		cm_id = {};
960 	struct ibv_pd			*pd = (struct ibv_pd *)0xfeedbeef;
961 	struct ibv_qp			qp = { .pd = pd };
962 	struct nvme_rdma_ctrlr		rctrlr = {};
963 	int				rc = 0;
964 
965 	rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
966 	rqpair.cm_id = &cm_id;
967 	g_nvme_hooks.get_ibv_pd = NULL;
968 	rqpair.qpair.poll_group = NULL;
969 	rqpair.qpair.ctrlr = &rctrlr.ctrlr;
970 	g_spdk_rdma_qp.qp = &qp;
971 	MOCK_SET(spdk_rdma_utils_get_pd, pd);
972 
973 	rc = nvme_rdma_qpair_init(&rqpair);
974 	CU_ASSERT(rc == 0);
975 
976 	CU_ASSERT(rqpair.cm_id->context == &rqpair.qpair);
977 	CU_ASSERT(rqpair.max_send_sge == NVME_RDMA_DEFAULT_TX_SGE);
978 	CU_ASSERT(rqpair.current_num_sends == 0);
979 	CU_ASSERT(rqpair.cq == (struct ibv_cq *)0xFEEDBEEF);
980 
981 	MOCK_CLEAR(spdk_rdma_utils_get_pd);
982 }
983 
984 static void
985 test_nvme_rdma_qpair_submit_request(void)
986 {
987 	int				rc;
988 	struct nvme_rdma_qpair		rqpair = {};
989 	struct spdk_nvme_ctrlr		ctrlr = {};
990 	struct nvme_request		req = {};
991 	struct nvme_rdma_poller		poller = {};
992 	struct spdk_nvme_rdma_req	*rdma_req = NULL;
993 
994 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
995 	req.payload = NVME_PAYLOAD_CONTIG((void *)0xdeadbeef, NULL);
996 	req.payload_size = 0;
997 	rqpair.mr_map = (struct spdk_rdma_utils_mem_map *)0xdeadbeef;
998 	rqpair.rdma_qp = (struct spdk_rdma_provider_qp *)0xdeadbeef;
999 	rqpair.qpair.ctrlr = &ctrlr;
1000 	rqpair.num_entries = 1;
1001 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1002 	rqpair.poller = &poller;
1003 
1004 	rc = nvme_rdma_create_reqs(&rqpair);
1005 	CU_ASSERT(rc == 0);
1006 	/* Give send_wr.next a non null value */
1007 	rdma_req = TAILQ_FIRST(&rqpair.free_reqs);
1008 	SPDK_CU_ASSERT_FATAL(rdma_req != NULL);
1009 	rdma_req->send_wr.next = (void *)0xdeadbeef;
1010 
1011 	rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
1012 	CU_ASSERT(rc == 0);
1013 	CU_ASSERT(rqpair.current_num_sends == 1);
1014 	CU_ASSERT(rdma_req->send_wr.next == NULL);
1015 	TAILQ_REMOVE(&rqpair.outstanding_reqs, rdma_req, link);
1016 	CU_ASSERT(TAILQ_EMPTY(&rqpair.outstanding_reqs));
1017 
1018 	/* No request available */
1019 	rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
1020 	CU_ASSERT(rc == -EAGAIN);
1021 	CU_ASSERT(rqpair.poller->stats.queued_requests == 1);
1022 
1023 	nvme_rdma_free_reqs(&rqpair);
1024 }
1025 
1026 static void
1027 test_rdma_ctrlr_get_memory_domains(void)
1028 {
1029 	struct nvme_rdma_ctrlr rctrlr = {};
1030 	struct nvme_rdma_qpair rqpair = {};
1031 	struct spdk_rdma_provider_qp rdma_qp = {};
1032 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef;
1033 	struct spdk_memory_domain *domains[1] = {NULL};
1034 
1035 	rdma_qp.domain = domain;
1036 	rqpair.rdma_qp = &rdma_qp;
1037 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1038 	rctrlr.ctrlr.adminq = &rqpair.qpair;
1039 
1040 	/* Test 1, input domains pointer is NULL */
1041 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 1) == 1);
1042 
1043 	/* Test 2, input array_size is 0 */
1044 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 0) == 1);
1045 	CU_ASSERT(domains[0] == NULL);
1046 
1047 	/* Test 3, both input domains pointer and array_size are NULL/0 */
1048 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 0) == 1);
1049 
1050 	/* Test 2, input parameters are valid */
1051 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 1) == 1);
1052 	CU_ASSERT(domains[0] == domain);
1053 }
1054 
1055 static void
1056 test_rdma_get_memory_translation(void)
1057 {
1058 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *) 0xfeedbeef;
1059 	struct ibv_qp qp = {.pd = (struct ibv_pd *) 0xfeedbeef};
1060 	struct spdk_rdma_provider_qp rdma_qp = {.qp = &qp, .domain = domain};
1061 	struct nvme_rdma_qpair rqpair = {.rdma_qp = &rdma_qp};
1062 	struct spdk_nvme_ns_cmd_ext_io_opts io_opts = {.memory_domain = domain};
1063 	struct nvme_request req = {.payload = {.opts = &io_opts}};
1064 	struct nvme_rdma_memory_translation_ctx ctx = {
1065 		.addr = (void *) 0xBAADF00D,
1066 		.length = 0x100
1067 	};
1068 	int rc;
1069 
1070 	/* case 1, using extended IO opts with DMA device.
1071 	 * Test 1 - spdk_dma_translate_data error, expect fail */
1072 	MOCK_SET(spdk_memory_domain_translate_data, -1);
1073 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1074 	CU_ASSERT(rc != 0);
1075 	MOCK_CLEAR(spdk_memory_domain_translate_data);
1076 
1077 	/* Test 2 - expect pass */
1078 	g_memory_translation_translation.iov_count = 1;
1079 	g_memory_translation_translation.iov.iov_base = ctx.addr + 1;
1080 	g_memory_translation_translation.iov.iov_len = ctx.length;
1081 	g_memory_translation_translation.rdma.lkey = 123;
1082 	g_memory_translation_translation.rdma.rkey = 321;
1083 
1084 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1085 	CU_ASSERT(rc == 0);
1086 	CU_ASSERT(ctx.lkey == g_memory_translation_translation.rdma.lkey);
1087 	CU_ASSERT(ctx.rkey == g_memory_translation_translation.rdma.rkey);
1088 	CU_ASSERT(ctx.addr == g_memory_translation_translation.iov.iov_base);
1089 	CU_ASSERT(ctx.length == g_memory_translation_translation.iov.iov_len);
1090 
1091 	/* case 2, using rdma translation
1092 	 * Test 1 - spdk_rdma_get_translation error, expect fail */
1093 	req.payload.opts = NULL;
1094 	MOCK_SET(spdk_rdma_utils_get_translation, -1);
1095 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1096 	CU_ASSERT(rc != 0);
1097 	MOCK_CLEAR(spdk_rdma_utils_get_translation);
1098 
1099 	/* Test 2 - expect pass */
1100 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1101 	CU_ASSERT(rc == 0);
1102 	CU_ASSERT(ctx.lkey == RDMA_UT_LKEY);
1103 	CU_ASSERT(ctx.rkey == RDMA_UT_RKEY);
1104 }
1105 
1106 static void
1107 test_get_rdma_qpair_from_wc(void)
1108 {
1109 	const uint32_t test_qp_num = 123;
1110 	struct nvme_rdma_poll_group	group = {};
1111 	struct nvme_rdma_qpair rqpair = {};
1112 	struct spdk_rdma_provider_qp rdma_qp = {};
1113 	struct ibv_qp qp = { .qp_num = test_qp_num };
1114 	struct ibv_wc wc = { .qp_num = test_qp_num };
1115 
1116 	STAILQ_INIT(&group.group.disconnected_qpairs);
1117 	STAILQ_INIT(&group.group.connected_qpairs);
1118 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1119 
1120 	/* Test 1 - Simulate case when nvme_rdma_qpair is disconnected but still in one of lists.
1121 	 * get_rdma_qpair_from_wc must return NULL */
1122 	STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
1123 	CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL);
1124 	STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
1125 
1126 	STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
1127 	CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == NULL);
1128 	STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
1129 
1130 	/* Test 2 - nvme_rdma_qpair with valid rdma_qp/ibv_qp and qp_num */
1131 	rdma_qp.qp = &qp;
1132 	rqpair.rdma_qp = &rdma_qp;
1133 
1134 	STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
1135 	CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair);
1136 	STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
1137 
1138 	STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
1139 	CU_ASSERT(get_rdma_qpair_from_wc(&group, &wc) == &rqpair);
1140 	STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
1141 }
1142 
1143 static void
1144 test_nvme_rdma_ctrlr_get_max_sges(void)
1145 {
1146 	struct nvme_rdma_ctrlr	rctrlr = {};
1147 
1148 	rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1149 	rctrlr.max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
1150 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1151 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1152 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
1153 
1154 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 32;
1155 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1156 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
1157 
1158 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 8;
1159 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1160 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 8);
1161 
1162 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1163 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4;
1164 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 1);
1165 
1166 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1167 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 6;
1168 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 2);
1169 }
1170 
1171 static void
1172 test_nvme_rdma_poll_group_get_stats(void)
1173 {
1174 	int rc = -1;
1175 	struct spdk_nvme_transport_poll_group_stat *tpointer = NULL;
1176 	struct nvme_rdma_poll_group tgroup = {};
1177 	struct ibv_device dev1, dev2 = {};
1178 	struct ibv_context contexts1, contexts2 = {};
1179 	struct nvme_rdma_poller *tpoller1 = NULL;
1180 	struct nvme_rdma_poller *tpoller2 = NULL;
1181 
1182 	memcpy(dev1.name, "/dev/test1", sizeof("/dev/test1"));
1183 	memcpy(dev2.name, "/dev/test2", sizeof("/dev/test2"));
1184 	contexts1.device = &dev1;
1185 	contexts2.device = &dev2;
1186 
1187 	/* Initialization */
1188 	STAILQ_INIT(&tgroup.pollers);
1189 	tpoller2 = nvme_rdma_poller_create(&tgroup, &contexts1);
1190 	SPDK_CU_ASSERT_FATAL(tpoller2 != NULL);
1191 	CU_ASSERT(tgroup.num_pollers == 1);
1192 
1193 	tpoller1 = nvme_rdma_poller_create(&tgroup, &contexts2);
1194 	SPDK_CU_ASSERT_FATAL(tpoller1 != NULL);
1195 	CU_ASSERT(tgroup.num_pollers == 2);
1196 	CU_ASSERT(&tgroup.pollers != NULL);
1197 
1198 	CU_ASSERT(tpoller1->device == &contexts2);
1199 	CU_ASSERT(tpoller2->device == &contexts1);
1200 	CU_ASSERT(strcmp(tpoller1->device->device->name, "/dev/test2") == 0);
1201 	CU_ASSERT(strcmp(tpoller2->device->device->name, "/dev/test1") == 0);
1202 	CU_ASSERT(tpoller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1203 	CU_ASSERT(tpoller2->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1204 	CU_ASSERT(tpoller1->required_num_wc == 0);
1205 	CU_ASSERT(tpoller2->required_num_wc == 0);
1206 
1207 	/* Test1: Invalid stats */
1208 	rc = nvme_rdma_poll_group_get_stats(NULL, &tpointer);
1209 	CU_ASSERT(rc == -EINVAL);
1210 
1211 	/* Test2: Invalid group pointer */
1212 	rc = nvme_rdma_poll_group_get_stats(&tgroup.group, NULL);
1213 	CU_ASSERT(rc == -EINVAL);
1214 
1215 	/* Test3: Success member variables should be correct */
1216 	tpoller1->stats.polls = 111;
1217 	tpoller1->stats.idle_polls = 112;
1218 	tpoller1->stats.completions = 113;
1219 	tpoller1->stats.queued_requests = 114;
1220 	tpoller1->stats.rdma_stats.send.num_submitted_wrs = 121;
1221 	tpoller1->stats.rdma_stats.send.doorbell_updates = 122;
1222 	tpoller1->stats.rdma_stats.recv.num_submitted_wrs = 131;
1223 	tpoller1->stats.rdma_stats.recv.doorbell_updates = 132;
1224 	tpoller2->stats.polls = 211;
1225 	tpoller2->stats.idle_polls = 212;
1226 	tpoller2->stats.completions = 213;
1227 	tpoller2->stats.queued_requests = 214;
1228 	tpoller2->stats.rdma_stats.send.num_submitted_wrs = 221;
1229 	tpoller2->stats.rdma_stats.send.doorbell_updates = 222;
1230 	tpoller2->stats.rdma_stats.recv.num_submitted_wrs = 231;
1231 	tpoller2->stats.rdma_stats.recv.doorbell_updates = 232;
1232 
1233 	rc = nvme_rdma_poll_group_get_stats(&tgroup.group, &tpointer);
1234 	CU_ASSERT(rc == 0);
1235 	CU_ASSERT(tpointer != NULL);
1236 	CU_ASSERT(tpointer->trtype == SPDK_NVME_TRANSPORT_RDMA);
1237 	CU_ASSERT(tpointer->rdma.num_devices == tgroup.num_pollers);
1238 	CU_ASSERT(tpointer->rdma.device_stats != NULL);
1239 
1240 	CU_ASSERT(strcmp(tpointer->rdma.device_stats[0].name, "/dev/test2") == 0);
1241 	CU_ASSERT(tpointer->rdma.device_stats[0].polls == 111);
1242 	CU_ASSERT(tpointer->rdma.device_stats[0].idle_polls == 112);
1243 	CU_ASSERT(tpointer->rdma.device_stats[0].completions == 113);
1244 	CU_ASSERT(tpointer->rdma.device_stats[0].queued_requests == 114);
1245 	CU_ASSERT(tpointer->rdma.device_stats[0].total_send_wrs == 121);
1246 	CU_ASSERT(tpointer->rdma.device_stats[0].send_doorbell_updates == 122);
1247 	CU_ASSERT(tpointer->rdma.device_stats[0].total_recv_wrs == 131);
1248 	CU_ASSERT(tpointer->rdma.device_stats[0].recv_doorbell_updates == 132);
1249 
1250 	CU_ASSERT(strcmp(tpointer->rdma.device_stats[1].name, "/dev/test1") == 0);
1251 	CU_ASSERT(tpointer->rdma.device_stats[1].polls == 211);
1252 	CU_ASSERT(tpointer->rdma.device_stats[1].idle_polls == 212);
1253 	CU_ASSERT(tpointer->rdma.device_stats[1].completions == 213);
1254 	CU_ASSERT(tpointer->rdma.device_stats[1].queued_requests == 214);
1255 	CU_ASSERT(tpointer->rdma.device_stats[1].total_send_wrs == 221);
1256 	CU_ASSERT(tpointer->rdma.device_stats[1].send_doorbell_updates == 222);
1257 	CU_ASSERT(tpointer->rdma.device_stats[1].total_recv_wrs == 231);
1258 	CU_ASSERT(tpointer->rdma.device_stats[1].recv_doorbell_updates == 232);
1259 
1260 	nvme_rdma_poll_group_free_stats(&tgroup.group, tpointer);
1261 	nvme_rdma_poll_group_free_pollers(&tgroup);
1262 }
1263 
1264 static void
1265 test_nvme_rdma_qpair_set_poller(void)
1266 {
1267 	int rc = -1;
1268 	struct nvme_rdma_poll_group *group;
1269 	struct spdk_nvme_transport_poll_group *tgroup;
1270 	struct nvme_rdma_poller *poller;
1271 	struct nvme_rdma_qpair rqpair = {};
1272 	struct rdma_cm_id cm_id = {};
1273 
1274 	/* Case1: Test function nvme_rdma_poll_group_create */
1275 	/* Test1: Function nvme_rdma_poll_group_create success */
1276 	tgroup = nvme_rdma_poll_group_create();
1277 	SPDK_CU_ASSERT_FATAL(tgroup != NULL);
1278 
1279 	group = nvme_rdma_poll_group(tgroup);
1280 	CU_ASSERT(group != NULL);
1281 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1282 
1283 	/* Case2: Test function nvme_rdma_qpair_set_poller */
1284 	rqpair.qpair.poll_group = tgroup;
1285 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1286 	rqpair.cm_id = &cm_id;
1287 
1288 	/* Test1: Function ibv_create_cq failed */
1289 	cm_id.verbs = (void *)0xFEEDBEEF;
1290 	MOCK_SET(ibv_create_cq, NULL);
1291 
1292 	rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
1293 	CU_ASSERT(rc == -EINVAL);
1294 	CU_ASSERT(rqpair.cq == NULL);
1295 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1296 
1297 	MOCK_CLEAR(ibv_create_cq);
1298 
1299 	/* Test2: Unable to find a cq for qpair on poll group */
1300 	cm_id.verbs = NULL;
1301 
1302 	rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
1303 	CU_ASSERT(rc == -EINVAL);
1304 	CU_ASSERT(rqpair.cq == NULL);
1305 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1306 
1307 	/* Test3: Match cq success, current_num_wc is enough */
1308 	MOCK_SET(ibv_create_cq, (struct ibv_cq *)0xFEEDBEEF);
1309 
1310 	cm_id.verbs = (void *)0xFEEDBEEF;
1311 	rqpair.num_entries = 0;
1312 
1313 	rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
1314 	CU_ASSERT(rc == 0);
1315 	CU_ASSERT(rqpair.cq == (void *)0xFEEDBEEF);
1316 
1317 	poller = STAILQ_FIRST(&group->pollers);
1318 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1319 	CU_ASSERT(STAILQ_NEXT(poller, link) == NULL);
1320 	CU_ASSERT(poller->device == (struct ibv_context *)0xFEEDBEEF);
1321 	CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1322 	CU_ASSERT(poller->required_num_wc == 0);
1323 	CU_ASSERT(rqpair.poller == poller);
1324 
1325 	rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
1326 
1327 	nvme_rdma_poll_group_put_poller(group, rqpair.poller);
1328 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1329 
1330 	rqpair.qpair.poll_group_tailq_head = &tgroup->connected_qpairs;
1331 
1332 	/* Test4: Match cq success, function ibv_resize_cq failed */
1333 	rqpair.cq = NULL;
1334 	rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1;
1335 	MOCK_SET(ibv_resize_cq, -1);
1336 
1337 	rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
1338 	CU_ASSERT(rc == -EPROTO);
1339 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1340 
1341 	/* Test5: Current_num_wc is not enough, resize success */
1342 	MOCK_SET(ibv_resize_cq, 0);
1343 
1344 	rc = nvme_rdma_qpair_set_poller(&rqpair.qpair);
1345 	CU_ASSERT(rc == 0);
1346 
1347 	poller = STAILQ_FIRST(&group->pollers);
1348 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1349 	CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE * 2);
1350 	CU_ASSERT(poller->required_num_wc == (DEFAULT_NVME_RDMA_CQ_SIZE - 1) * 2);
1351 	CU_ASSERT(rqpair.cq == poller->cq);
1352 	CU_ASSERT(rqpair.poller == poller);
1353 
1354 	rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
1355 
1356 	nvme_rdma_poll_group_put_poller(group, rqpair.poller);
1357 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1358 
1359 	rc = nvme_rdma_poll_group_destroy(tgroup);
1360 	CU_ASSERT(rc == 0);
1361 }
1362 
1363 int
1364 main(int argc, char **argv)
1365 {
1366 	CU_pSuite	suite = NULL;
1367 	unsigned int	num_failures;
1368 
1369 	CU_initialize_registry();
1370 
1371 	suite = CU_add_suite("nvme_rdma", NULL, NULL);
1372 	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
1373 	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
1374 	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
1375 	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
1376 	CU_ADD_TEST(suite, test_nvme_rdma_create_reqs);
1377 	CU_ADD_TEST(suite, test_nvme_rdma_create_rsps);
1378 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair);
1379 	CU_ADD_TEST(suite, test_nvme_rdma_poller_create);
1380 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event);
1381 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_construct);
1382 	CU_ADD_TEST(suite, test_nvme_rdma_req_put_and_get);
1383 	CU_ADD_TEST(suite, test_nvme_rdma_req_init);
1384 	CU_ADD_TEST(suite, test_nvme_rdma_validate_cm_event);
1385 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_init);
1386 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);
1387 	CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domains);
1388 	CU_ADD_TEST(suite, test_rdma_get_memory_translation);
1389 	CU_ADD_TEST(suite, test_get_rdma_qpair_from_wc);
1390 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_get_max_sges);
1391 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_stats);
1392 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_set_poller);
1393 
1394 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
1395 	CU_cleanup_registry();
1396 	return num_failures;
1397 }
1398