xref: /spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c (revision 18c8b52afa69f39481ebb75711b2f30b11693f9d)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation. All rights reserved.
3  *   Copyright (c) 2021, 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 #include "spdk_cunit.h"
8 #include "nvme/nvme_rdma.c"
9 #include "common/lib/nvme/common_stubs.h"
10 #include "common/lib/test_rdma.c"
11 
12 SPDK_LOG_REGISTER_COMPONENT(nvme)
13 
14 DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
15 		uint64_t size, uint64_t translation), 0);
16 DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
17 		uint64_t size), 0);
18 
19 DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
20 		const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
21 DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
22 
23 DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
24 
25 DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
26 DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
27 		uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0);
28 
29 DEFINE_STUB(rdma_ack_cm_event, int, (struct rdma_cm_event *event), 0);
30 DEFINE_STUB_V(rdma_free_devices, (struct ibv_context **list));
31 DEFINE_STUB(fcntl, int, (int fd, int cmd, ...), 0);
32 DEFINE_STUB_V(rdma_destroy_event_channel, (struct rdma_event_channel *channel));
33 
34 DEFINE_STUB(ibv_dereg_mr, int, (struct ibv_mr *mr), 0);
35 DEFINE_STUB(ibv_resize_cq, int, (struct ibv_cq *cq, int cqe), 0);
36 
37 DEFINE_STUB(spdk_memory_domain_get_context, struct spdk_memory_domain_ctx *,
38 	    (struct spdk_memory_domain *device), NULL);
39 DEFINE_STUB(spdk_memory_domain_get_dma_device_type, enum spdk_dma_device_type,
40 	    (struct spdk_memory_domain *device), SPDK_DMA_DEVICE_TYPE_RDMA);
41 DEFINE_STUB_V(spdk_memory_domain_destroy, (struct spdk_memory_domain *device));
42 DEFINE_STUB(spdk_memory_domain_pull_data, int, (struct spdk_memory_domain *src_domain,
43 		void *src_domain_ctx, struct iovec *src_iov, uint32_t src_iov_cnt, struct iovec *dst_iov,
44 		uint32_t dst_iov_cnt, spdk_memory_domain_data_cpl_cb cpl_cb, void *cpl_cb_arg), 0);
45 
46 DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
47 		struct spdk_nvme_cmd *cmd));
48 
49 DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
50 		struct spdk_nvme_cpl *cpl));
51 
52 DEFINE_RETURN_MOCK(spdk_memory_domain_create, int);
53 int
54 spdk_memory_domain_create(struct spdk_memory_domain **domain, enum spdk_dma_device_type type,
55 			  struct spdk_memory_domain_ctx *ctx, const char *id)
56 {
57 	static struct spdk_memory_domain *__dma_dev = (struct spdk_memory_domain *)0xdeaddead;
58 
59 	HANDLE_RETURN_MOCK(spdk_memory_domain_create);
60 
61 	*domain = __dma_dev;
62 
63 	return 0;
64 }
65 
66 static struct spdk_memory_domain_translation_result g_memory_translation_translation = {.size = sizeof(struct spdk_memory_domain_translation_result) };
67 
68 DEFINE_RETURN_MOCK(spdk_memory_domain_translate_data, int);
69 int
70 spdk_memory_domain_translate_data(struct spdk_memory_domain *src_domain, void *src_domain_ctx,
71 				  struct spdk_memory_domain *dst_domain, struct spdk_memory_domain_translation_ctx *dst_domain_ctx,
72 				  void *addr, size_t len, struct spdk_memory_domain_translation_result *result)
73 {
74 
75 	HANDLE_RETURN_MOCK(spdk_memory_domain_translate_data);
76 
77 	memcpy(result, &g_memory_translation_translation, sizeof(g_memory_translation_translation));
78 
79 	return 0;
80 }
81 
82 /* ibv_reg_mr can be a macro, need to undefine it */
83 #ifdef ibv_reg_mr
84 #undef ibv_reg_mr
85 #endif
86 
87 DEFINE_RETURN_MOCK(ibv_reg_mr, struct ibv_mr *);
88 struct ibv_mr *
89 ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access)
90 {
91 	HANDLE_RETURN_MOCK(ibv_reg_mr);
92 	if (length > 0) {
93 		return &g_rdma_mr;
94 	} else {
95 		return NULL;
96 	}
97 }
98 
99 struct nvme_rdma_ut_bdev_io {
100 	struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
101 	int iovpos;
102 	int iovcnt;
103 };
104 
105 DEFINE_RETURN_MOCK(rdma_get_devices, struct ibv_context **);
106 struct ibv_context **
107 rdma_get_devices(int *num_devices)
108 {
109 	static struct ibv_context *_contexts[] = {
110 		(struct ibv_context *)0xDEADBEEF,
111 		(struct ibv_context *)0xFEEDBEEF,
112 		NULL
113 	};
114 
115 	HANDLE_RETURN_MOCK(rdma_get_devices);
116 	return _contexts;
117 }
118 
119 DEFINE_RETURN_MOCK(rdma_create_event_channel, struct rdma_event_channel *);
120 struct rdma_event_channel *
121 rdma_create_event_channel(void)
122 {
123 	HANDLE_RETURN_MOCK(rdma_create_event_channel);
124 	return NULL;
125 }
126 
127 DEFINE_RETURN_MOCK(ibv_query_device, int);
128 int
129 ibv_query_device(struct ibv_context *context,
130 		 struct ibv_device_attr *device_attr)
131 {
132 	if (device_attr) {
133 		device_attr->max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
134 	}
135 	HANDLE_RETURN_MOCK(ibv_query_device);
136 
137 	return 0;
138 }
139 
140 /* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
141 static void
142 nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
143 {
144 	struct nvme_rdma_ut_bdev_io *bio = cb_arg;
145 	struct iovec *iov;
146 
147 	for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
148 		iov = &bio->iovs[bio->iovpos];
149 		/* Only provide offsets at the beginning of an iov */
150 		if (offset == 0) {
151 			break;
152 		}
153 
154 		offset -= iov->iov_len;
155 	}
156 
157 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
158 }
159 
160 static int
161 nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
162 {
163 	struct nvme_rdma_ut_bdev_io *bio = cb_arg;
164 	struct iovec *iov;
165 
166 	SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
167 
168 	if (bio->iovpos == bio->iovcnt) {
169 		return -1;
170 	}
171 
172 	iov = &bio->iovs[bio->iovpos];
173 
174 	*address = iov->iov_base;
175 	*length = iov->iov_len;
176 	bio->iovpos++;
177 
178 	return 0;
179 }
180 
181 static void
182 test_nvme_rdma_build_sgl_request(void)
183 {
184 	struct nvme_rdma_qpair rqpair;
185 	struct spdk_nvme_ctrlr ctrlr = {0};
186 	struct spdk_nvmf_cmd cmd = {{0}};
187 	struct spdk_nvme_rdma_req rdma_req = {0};
188 	struct nvme_request req = {{0}};
189 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
190 	uint64_t i;
191 	int rc;
192 
193 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
194 	ctrlr.cdata.nvmf_specific.msdbd = 16;
195 	ctrlr.ioccsz_bytes = 4096;
196 
197 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
198 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
199 	rqpair.qpair.ctrlr = &ctrlr;
200 	rqpair.cmds = &cmd;
201 	cmd.sgl[0].address = 0x1111;
202 	rdma_req.id = 0;
203 	rdma_req.req = &req;
204 
205 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
206 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
207 	req.payload.contig_or_cb_arg = &bio;
208 	req.qpair = &rqpair.qpair;
209 
210 	for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
211 		bio.iovs[i].iov_base = (void *)i + 1;
212 		bio.iovs[i].iov_len = 0;
213 	}
214 
215 	/* Test case 1: single SGL. Expected: PASS */
216 	bio.iovpos = 0;
217 	req.payload_offset = 0;
218 	req.payload_size = 0x1000;
219 	bio.iovs[0].iov_len = 0x1000;
220 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
221 	SPDK_CU_ASSERT_FATAL(rc == 0);
222 	CU_ASSERT(bio.iovpos == 1);
223 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
224 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
225 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
226 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
227 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
228 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
229 
230 	/* Test case 2: multiple SGL. Expected: PASS */
231 	bio.iovpos = 0;
232 	req.payload_offset = 0;
233 	req.payload_size = 0x4000;
234 	for (i = 0; i < 4; i++) {
235 		bio.iovs[i].iov_len = 0x1000;
236 	}
237 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
238 	SPDK_CU_ASSERT_FATAL(rc == 0);
239 	CU_ASSERT(bio.iovpos == 4);
240 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
241 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
242 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
243 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
244 	CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
245 			  struct spdk_nvme_cmd))
246 	for (i = 0; i < 4; i++) {
247 		CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
248 		CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
249 		CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
250 		CU_ASSERT(cmd.sgl[i].keyed.key == RDMA_UT_RKEY);
251 		CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
252 	}
253 
254 	/* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
255 	bio.iovpos = 0;
256 	req.payload_offset = 0;
257 	g_mr_size = 0x800;
258 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
259 	SPDK_CU_ASSERT_FATAL(rc != 0);
260 	CU_ASSERT(bio.iovpos == 1);
261 
262 	/* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
263 	bio.iovpos = 0;
264 	bio.iovcnt = 4;
265 	req.payload_offset = 0;
266 	req.payload_size = 0x6000;
267 	g_mr_size = 0x0;
268 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
269 	SPDK_CU_ASSERT_FATAL(rc != 0);
270 	CU_ASSERT(bio.iovpos == bio.iovcnt);
271 	bio.iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS;
272 
273 	/* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
274 	req.payload_size = 0x1000 + (1 << 24);
275 	bio.iovs[0].iov_len = 0x1000;
276 	bio.iovs[1].iov_len = 1 << 24;
277 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
278 	SPDK_CU_ASSERT_FATAL(rc != 0);
279 
280 	/* Test case 6: 4 SGL descriptors, size of SGL descriptors exceeds ICD. Expected: FAIL */
281 	ctrlr.ioccsz_bytes = 60;
282 	bio.iovpos = 0;
283 	req.payload_offset = 0;
284 	req.payload_size = 0x4000;
285 	for (i = 0; i < 4; i++) {
286 		bio.iovs[i].iov_len = 0x1000;
287 	}
288 	rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
289 	SPDK_CU_ASSERT_FATAL(rc == -1);
290 }
291 
292 static void
293 test_nvme_rdma_build_sgl_inline_request(void)
294 {
295 	struct nvme_rdma_qpair rqpair;
296 	struct spdk_nvme_ctrlr ctrlr = {0};
297 	struct spdk_nvmf_cmd cmd = {{0}};
298 	struct spdk_nvme_rdma_req rdma_req = {0};
299 	struct nvme_request req = {{0}};
300 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
301 	int rc;
302 
303 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
304 	ctrlr.cdata.nvmf_specific.msdbd = 16;
305 
306 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
307 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
308 	rqpair.qpair.ctrlr = &ctrlr;
309 	rqpair.cmds = &cmd;
310 	cmd.sgl[0].address = 0x1111;
311 	rdma_req.id = 0;
312 	rdma_req.req = &req;
313 
314 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
315 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
316 	req.payload.contig_or_cb_arg = &bio;
317 	req.qpair = &rqpair.qpair;
318 
319 	/* Test case 1: single inline SGL. Expected: PASS */
320 	bio.iovpos = 0;
321 	req.payload_offset = 0;
322 	req.payload_size = 0x1000;
323 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
324 	bio.iovs[0].iov_len = 0x1000;
325 	rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
326 	SPDK_CU_ASSERT_FATAL(rc == 0);
327 	CU_ASSERT(bio.iovpos == 1);
328 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
329 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
330 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
331 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
332 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
333 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
334 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
335 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
336 
337 	/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
338 	bio.iovpos = 0;
339 	req.payload_offset = 0;
340 	req.payload_size = 1 << 24;
341 	bio.iovs[0].iov_len = 1 << 24;
342 	rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
343 	SPDK_CU_ASSERT_FATAL(rc == 0);
344 	CU_ASSERT(bio.iovpos == 1);
345 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
346 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
347 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
348 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
349 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
350 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
351 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
352 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
353 }
354 
355 static void
356 test_nvme_rdma_build_contig_request(void)
357 {
358 	struct nvme_rdma_qpair rqpair;
359 	struct spdk_nvme_ctrlr ctrlr = {0};
360 	struct spdk_nvmf_cmd cmd = {{0}};
361 	struct spdk_nvme_rdma_req rdma_req = {0};
362 	struct nvme_request req = {{0}};
363 	int rc;
364 
365 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
366 	ctrlr.cdata.nvmf_specific.msdbd = 16;
367 
368 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
369 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
370 	rqpair.qpair.ctrlr = &ctrlr;
371 	rqpair.cmds = &cmd;
372 	cmd.sgl[0].address = 0x1111;
373 	rdma_req.id = 0;
374 	rdma_req.req = &req;
375 
376 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
377 	req.qpair = &rqpair.qpair;
378 
379 	/* Test case 1: contig request. Expected: PASS */
380 	req.payload_offset = 0;
381 	req.payload_size = 0x1000;
382 	rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
383 	SPDK_CU_ASSERT_FATAL(rc == 0);
384 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
385 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
386 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
387 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
388 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
389 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
390 
391 	/* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
392 	req.payload_offset = 0;
393 	req.payload_size = 1 << 24;
394 	rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
395 	SPDK_CU_ASSERT_FATAL(rc != 0);
396 }
397 
398 static void
399 test_nvme_rdma_build_contig_inline_request(void)
400 {
401 	struct nvme_rdma_qpair rqpair;
402 	struct spdk_nvme_ctrlr ctrlr = {0};
403 	struct spdk_nvmf_cmd cmd = {{0}};
404 	struct spdk_nvme_rdma_req rdma_req = {0};
405 	struct nvme_request req = {{0}};
406 	int rc;
407 
408 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
409 	ctrlr.cdata.nvmf_specific.msdbd = 16;
410 
411 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
412 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
413 	rqpair.qpair.ctrlr = &ctrlr;
414 	rqpair.cmds = &cmd;
415 	cmd.sgl[0].address = 0x1111;
416 	rdma_req.id = 0;
417 	rdma_req.req = &req;
418 
419 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
420 	req.qpair = &rqpair.qpair;
421 
422 	/* Test case 1: single inline SGL. Expected: PASS */
423 	req.payload_offset = 0;
424 	req.payload_size = 0x1000;
425 	rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
426 	SPDK_CU_ASSERT_FATAL(rc == 0);
427 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
428 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
429 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
430 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
431 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
432 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
433 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
434 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
435 
436 	/* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
437 	req.payload_offset = 0;
438 	req.payload_size = 1 << 24;
439 	rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
440 	SPDK_CU_ASSERT_FATAL(rc == 0);
441 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
442 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
443 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
444 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
445 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
446 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
447 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
448 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
449 }
450 
451 static void
452 test_nvme_rdma_alloc_reqs(void)
453 {
454 	struct nvme_rdma_qpair rqpair = {};
455 	int rc;
456 
457 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
458 
459 	/* Test case 1: zero entry. Expect: FAIL */
460 	rqpair.num_entries = 0;
461 
462 	rc = nvme_rdma_alloc_reqs(&rqpair);
463 	CU_ASSERT(rqpair.rdma_reqs == NULL);
464 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
465 
466 	/* Test case 2: single entry. Expect: PASS */
467 	memset(&rqpair, 0, sizeof(rqpair));
468 	rqpair.num_entries = 1;
469 
470 	rc = nvme_rdma_alloc_reqs(&rqpair);
471 	CU_ASSERT(rc == 0);
472 	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].addr
473 		  == (uint64_t)&rqpair.cmds[0]);
474 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.wr_id
475 		  == (uint64_t)&rqpair.rdma_reqs[0].rdma_wr);
476 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.next == NULL);
477 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.opcode == IBV_WR_SEND);
478 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.send_flags == IBV_SEND_SIGNALED);
479 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.sg_list
480 		  == rqpair.rdma_reqs[0].send_sgl);
481 	CU_ASSERT(rqpair.rdma_reqs[0].send_wr.imm_data == 0);
482 	spdk_free(rqpair.rdma_reqs);
483 	spdk_free(rqpair.cmds);
484 
485 	/* Test case 3: multiple entries. Expect: PASS */
486 	memset(&rqpair, 0, sizeof(rqpair));
487 	rqpair.num_entries = 5;
488 
489 	rc = nvme_rdma_alloc_reqs(&rqpair);
490 	CU_ASSERT(rc == 0);
491 	for (int i = 0; i < 5; i++) {
492 		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].addr
493 			  == (uint64_t)&rqpair.cmds[i]);
494 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.wr_id
495 			  == (uint64_t)&rqpair.rdma_reqs[i].rdma_wr);
496 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.next == NULL);
497 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.opcode == IBV_WR_SEND);
498 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.send_flags
499 			  == IBV_SEND_SIGNALED);
500 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.sg_list
501 			  == rqpair.rdma_reqs[i].send_sgl);
502 		CU_ASSERT(rqpair.rdma_reqs[i].send_wr.imm_data == 0);
503 	}
504 	spdk_free(rqpair.rdma_reqs);
505 	spdk_free(rqpair.cmds);
506 }
507 
508 static void
509 test_nvme_rdma_alloc_rsps(void)
510 {
511 	struct nvme_rdma_qpair rqpair = {};
512 	int rc;
513 
514 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
515 
516 	/* Test case 1 calloc false */
517 	rqpair.num_entries = 0;
518 	rc = nvme_rdma_alloc_rsps(&rqpair);
519 	CU_ASSERT(rqpair.rsp_sgls == NULL);
520 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
521 
522 	/* Test case 2 calloc success */
523 	memset(&rqpair, 0, sizeof(rqpair));
524 	rqpair.num_entries = 1;
525 
526 	rc = nvme_rdma_alloc_rsps(&rqpair);
527 	CU_ASSERT(rc == 0);
528 	CU_ASSERT(rqpair.rsp_sgls != NULL);
529 	CU_ASSERT(rqpair.rsp_recv_wrs != NULL);
530 	CU_ASSERT(rqpair.rsps != NULL);
531 	nvme_rdma_free_rsps(&rqpair);
532 }
533 
534 static void
535 test_nvme_rdma_ctrlr_create_qpair(void)
536 {
537 	struct spdk_nvme_ctrlr ctrlr = {};
538 	uint16_t qid, qsize;
539 	struct spdk_nvme_qpair *qpair;
540 	struct nvme_rdma_qpair *rqpair;
541 
542 	/* Test case 1: max qsize. Expect: PASS */
543 	qsize = 0xffff;
544 	qid = 1;
545 
546 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
547 					     SPDK_NVME_QPRIO_URGENT, 1,
548 					     false, false);
549 	CU_ASSERT(qpair != NULL);
550 	rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
551 	CU_ASSERT(qpair == &rqpair->qpair);
552 	CU_ASSERT(rqpair->num_entries == qsize - 1);
553 	CU_ASSERT(rqpair->delay_cmd_submit == false);
554 	CU_ASSERT(rqpair->rsp_sgls != NULL);
555 	CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
556 	CU_ASSERT(rqpair->rsps != NULL);
557 
558 	nvme_rdma_free_reqs(rqpair);
559 	nvme_rdma_free_rsps(rqpair);
560 	nvme_rdma_free(rqpair);
561 	rqpair = NULL;
562 
563 	/* Test case 2: queue size 2. Expect: PASS */
564 	qsize = 2;
565 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
566 					     SPDK_NVME_QPRIO_URGENT, 1,
567 					     false, false);
568 	CU_ASSERT(qpair != NULL);
569 	rqpair = SPDK_CONTAINEROF(qpair, struct nvme_rdma_qpair, qpair);
570 	CU_ASSERT(rqpair->num_entries == qsize - 1);
571 	CU_ASSERT(rqpair->rsp_sgls != NULL);
572 	CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
573 	CU_ASSERT(rqpair->rsps != NULL);
574 
575 	nvme_rdma_free_reqs(rqpair);
576 	nvme_rdma_free_rsps(rqpair);
577 	nvme_rdma_free(rqpair);
578 	rqpair = NULL;
579 
580 	/* Test case 3: queue size zero. Expect: FAIL */
581 	qsize = 0;
582 
583 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
584 					     SPDK_NVME_QPRIO_URGENT, 1,
585 					     false, false);
586 	SPDK_CU_ASSERT_FATAL(qpair == NULL);
587 
588 	/* Test case 4: queue size 1. Expect: FAIL */
589 	qsize = 1;
590 	qpair = nvme_rdma_ctrlr_create_qpair(&ctrlr, qid, qsize,
591 					     SPDK_NVME_QPRIO_URGENT, 1,
592 					     false, false);
593 	SPDK_CU_ASSERT_FATAL(qpair == NULL);
594 }
595 
596 DEFINE_STUB(ibv_create_cq, struct ibv_cq *, (struct ibv_context *context, int cqe, void *cq_context,
597 		struct ibv_comp_channel *channel, int comp_vector), (struct ibv_cq *)0xFEEDBEEF);
598 DEFINE_STUB(ibv_destroy_cq, int, (struct ibv_cq *cq), 0);
599 
600 static void
601 test_nvme_rdma_poller_create(void)
602 {
603 	struct nvme_rdma_poll_group	group = {};
604 	struct ibv_context context = {
605 		.device = (struct ibv_device *)0xDEADBEEF
606 	};
607 	struct ibv_context context_2 = {
608 		.device = (struct ibv_device *)0xBAADBEEF
609 	};
610 	struct nvme_rdma_poller *poller_1, *poller_2, *poller_3;
611 
612 	/* Case: calloc and ibv not need to fail test */
613 	STAILQ_INIT(&group.pollers);
614 
615 	poller_1 = nvme_rdma_poll_group_get_poller(&group, &context);
616 	SPDK_CU_ASSERT_FATAL(poller_1 != NULL);
617 	CU_ASSERT(group.num_pollers == 1);
618 	CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_1);
619 	CU_ASSERT(poller_1->refcnt == 1);
620 	CU_ASSERT(poller_1->device == &context);
621 	CU_ASSERT(poller_1->cq == (struct ibv_cq *)0xFEEDBEEF);
622 	CU_ASSERT(poller_1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
623 	CU_ASSERT(poller_1->required_num_wc == 0);
624 
625 	poller_2 = nvme_rdma_poll_group_get_poller(&group, &context_2);
626 	SPDK_CU_ASSERT_FATAL(poller_2 != NULL);
627 	CU_ASSERT(group.num_pollers == 2);
628 	CU_ASSERT(STAILQ_FIRST(&group.pollers) == poller_2);
629 	CU_ASSERT(poller_2->refcnt == 1);
630 	CU_ASSERT(poller_2->device == &context_2);
631 
632 	poller_3 = nvme_rdma_poll_group_get_poller(&group, &context);
633 	SPDK_CU_ASSERT_FATAL(poller_3 != NULL);
634 	CU_ASSERT(poller_3 == poller_1);
635 	CU_ASSERT(group.num_pollers == 2);
636 	CU_ASSERT(poller_3->refcnt == 2);
637 
638 	nvme_rdma_poll_group_put_poller(&group, poller_2);
639 	CU_ASSERT(group.num_pollers == 1);
640 
641 	nvme_rdma_poll_group_put_poller(&group, poller_1);
642 	CU_ASSERT(group.num_pollers == 1);
643 	CU_ASSERT(poller_3->refcnt == 1);
644 
645 	nvme_rdma_poll_group_put_poller(&group, poller_3);
646 	CU_ASSERT(STAILQ_EMPTY(&group.pollers));
647 	CU_ASSERT(group.num_pollers == 0);
648 
649 	nvme_rdma_poll_group_free_pollers(&group);
650 }
651 
652 static void
653 test_nvme_rdma_qpair_process_cm_event(void)
654 {
655 	struct nvme_rdma_qpair rqpair = {};
656 	struct rdma_cm_event	 event = {};
657 	struct spdk_nvmf_rdma_accept_private_data	accept_data = {};
658 	int rc = 0;
659 
660 	/* case1: event == RDMA_CM_EVENT_ADDR_RESOLVED */
661 	rqpair.evt = &event;
662 	event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
663 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
664 	CU_ASSERT(rc == 0);
665 
666 	/* case2: event == RDMA_CM_EVENT_CONNECT_REQUEST */
667 	rqpair.evt = &event;
668 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
669 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
670 	CU_ASSERT(rc == 0);
671 
672 	/* case3: event == RDMA_CM_EVENT_CONNECT_ERROR */
673 	rqpair.evt = &event;
674 	event.event = RDMA_CM_EVENT_CONNECT_ERROR;
675 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
676 	CU_ASSERT(rc == 0);
677 
678 	/* case4: event == RDMA_CM_EVENT_UNREACHABLE */
679 	rqpair.evt = &event;
680 	event.event = RDMA_CM_EVENT_UNREACHABLE;
681 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
682 	CU_ASSERT(rc == 0);
683 
684 	/* case5: event == RDMA_CM_EVENT_CONNECT_RESPONSE */
685 	rqpair.evt = &event;
686 	event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
687 	event.param.conn.private_data = NULL;
688 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
689 	CU_ASSERT(rc == -1);
690 
691 	rqpair.evt = &event;
692 	event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
693 	event.param.conn.private_data = &accept_data;
694 	accept_data.crqsize = 512;
695 	rqpair.num_entries = 1024;
696 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
697 	CU_ASSERT(rc == 0);
698 	CU_ASSERT(rqpair.num_entries == 1024);
699 
700 	/* case6: event == RDMA_CM_EVENT_DISCONNECTED */
701 	rqpair.evt = &event;
702 	event.event = RDMA_CM_EVENT_DISCONNECTED;
703 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
704 	CU_ASSERT(rc == 0);
705 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_REMOTE);
706 
707 	/* case7: event == RDMA_CM_EVENT_DEVICE_REMOVAL */
708 	rqpair.evt = &event;
709 	event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
710 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
711 	CU_ASSERT(rc == 0);
712 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
713 
714 	/* case8: event == RDMA_CM_EVENT_MULTICAST_JOIN */
715 	rqpair.evt = &event;
716 	event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
717 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
718 	CU_ASSERT(rc == 0);
719 
720 	/* case9: event == RDMA_CM_EVENT_ADDR_CHANGE */
721 	rqpair.evt = &event;
722 	event.event = RDMA_CM_EVENT_ADDR_CHANGE;
723 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
724 	CU_ASSERT(rc == 0);
725 	CU_ASSERT(rqpair.qpair.transport_failure_reason == SPDK_NVME_QPAIR_FAILURE_LOCAL);
726 
727 	/* case10: event == RDMA_CM_EVENT_TIMEWAIT_EXIT */
728 	rqpair.evt = &event;
729 	event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
730 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
731 	CU_ASSERT(rc == 0);
732 
733 	/* case11: default event == 0xFF */
734 	rqpair.evt = &event;
735 	event.event = 0xFF;
736 	rc = nvme_rdma_qpair_process_cm_event(&rqpair);
737 	CU_ASSERT(rc == 0);
738 }
739 
740 static void
741 test_nvme_rdma_mr_get_lkey(void)
742 {
743 	union nvme_rdma_mr mr = {};
744 	struct ibv_mr	ibv_mr = {};
745 	uint64_t mr_key;
746 	uint32_t lkey;
747 
748 	memset(&g_nvme_hooks, 0, sizeof(g_nvme_hooks));
749 	ibv_mr.lkey = 1;
750 	mr_key = 2;
751 
752 	/* Case 1:  get key form key address */
753 	mr.key = (uint64_t)&mr_key;
754 	g_nvme_hooks.get_rkey = (void *)0xAEADBEEF;
755 
756 	lkey = nvme_rdma_mr_get_lkey(&mr);
757 	CU_ASSERT(lkey == mr_key);
758 
759 	/* Case 2: Get key from ibv_mr  */
760 	g_nvme_hooks.get_rkey = NULL;
761 	mr.mr = &ibv_mr;
762 
763 	lkey = nvme_rdma_mr_get_lkey(&mr);
764 	CU_ASSERT(lkey == ibv_mr.lkey);
765 }
766 
767 static void
768 test_nvme_rdma_ctrlr_construct(void)
769 {
770 	struct spdk_nvme_ctrlr *ctrlr;
771 	struct spdk_nvme_transport_id trid = {};
772 	struct spdk_nvme_ctrlr_opts opts = {};
773 	struct nvme_rdma_qpair *rqpair = NULL;
774 	struct nvme_rdma_ctrlr *rctrlr = NULL;
775 	struct rdma_event_channel cm_channel = {};
776 	void *devhandle = NULL;
777 	int rc;
778 
779 	opts.transport_retry_count = NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT + 1;
780 	opts.transport_ack_timeout = NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT + 1;
781 	opts.admin_queue_size = 0xFFFF;
782 	trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
783 	trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
784 	MOCK_SET(rdma_create_event_channel, &cm_channel);
785 
786 	ctrlr = nvme_rdma_ctrlr_construct(&trid, &opts, devhandle);
787 	SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
788 	CU_ASSERT(ctrlr->opts.transport_retry_count ==
789 		  NVME_RDMA_CTRLR_MAX_TRANSPORT_RETRY_COUNT);
790 	CU_ASSERT(ctrlr->opts.transport_ack_timeout ==
791 		  NVME_RDMA_CTRLR_MAX_TRANSPORT_ACK_TIMEOUT);
792 	CU_ASSERT(ctrlr->opts.admin_queue_size == opts.admin_queue_size);
793 	rctrlr = SPDK_CONTAINEROF(ctrlr, struct nvme_rdma_ctrlr, ctrlr);
794 	CU_ASSERT(rctrlr->max_sge == NVME_RDMA_MAX_SGL_DESCRIPTORS);
795 	CU_ASSERT(rctrlr->cm_channel == &cm_channel);
796 	CU_ASSERT(!strncmp((char *)&rctrlr->ctrlr.trid,
797 			   (char *)&trid, sizeof(trid)));
798 
799 	SPDK_CU_ASSERT_FATAL(ctrlr->adminq != NULL);
800 	rqpair = SPDK_CONTAINEROF(ctrlr->adminq, struct nvme_rdma_qpair, qpair);
801 	CU_ASSERT(rqpair->num_entries == opts.admin_queue_size - 1);
802 	CU_ASSERT(rqpair->delay_cmd_submit == false);
803 	CU_ASSERT(rqpair->rsp_sgls != NULL);
804 	CU_ASSERT(rqpair->rsp_recv_wrs != NULL);
805 	CU_ASSERT(rqpair->rsps != NULL);
806 	MOCK_CLEAR(rdma_create_event_channel);
807 
808 	/* Hardcode the trtype, because nvme_qpair_init() is stub function. */
809 	rqpair->qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
810 	rc = nvme_rdma_ctrlr_destruct(ctrlr);
811 	CU_ASSERT(rc == 0);
812 }
813 
814 static void
815 test_nvme_rdma_req_put_and_get(void)
816 {
817 	struct nvme_rdma_qpair rqpair = {};
818 	struct spdk_nvme_rdma_req rdma_req = {};
819 	struct spdk_nvme_rdma_req *rdma_req_get;
820 
821 	/* case 1: nvme_rdma_req_put */
822 	TAILQ_INIT(&rqpair.free_reqs);
823 	rdma_req.completion_flags = 1;
824 	rdma_req.req = (struct nvme_request *)0xDEADBEFF;
825 	rdma_req.id = 10086;
826 	nvme_rdma_req_put(&rqpair, &rdma_req);
827 
828 	CU_ASSERT(rqpair.free_reqs.tqh_first == &rdma_req);
829 	CU_ASSERT(rqpair.free_reqs.tqh_first->completion_flags == 0);
830 	CU_ASSERT(rqpair.free_reqs.tqh_first->req == NULL);
831 	CU_ASSERT(rqpair.free_reqs.tqh_first->id == 10086);
832 	CU_ASSERT(rdma_req.completion_flags == 0);
833 	CU_ASSERT(rdma_req.req == NULL);
834 
835 	/* case 2: nvme_rdma_req_get */
836 	TAILQ_INIT(&rqpair.outstanding_reqs);
837 	rdma_req_get = nvme_rdma_req_get(&rqpair);
838 	CU_ASSERT(rdma_req_get == &rdma_req);
839 	CU_ASSERT(rdma_req_get->id == 10086);
840 	CU_ASSERT(rqpair.free_reqs.tqh_first == NULL);
841 	CU_ASSERT(rqpair.outstanding_reqs.tqh_first == rdma_req_get);
842 }
843 
844 static void
845 test_nvme_rdma_req_init(void)
846 {
847 	struct nvme_rdma_qpair rqpair = {};
848 	struct spdk_nvme_ctrlr ctrlr = {};
849 	struct spdk_nvmf_cmd cmd = {};
850 	struct spdk_nvme_rdma_req rdma_req = {};
851 	struct nvme_request req = {};
852 	struct nvme_rdma_ut_bdev_io bio = { .iovcnt = NVME_RDMA_MAX_SGL_DESCRIPTORS };
853 	int rc = 1;
854 
855 	ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
856 	ctrlr.cdata.nvmf_specific.msdbd = 16;
857 
858 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
859 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
860 	rqpair.qpair.ctrlr = &ctrlr;
861 	rqpair.cmds = &cmd;
862 	cmd.sgl[0].address = 0x1111;
863 	rdma_req.id = 0;
864 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
865 
866 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
867 	/* case 1: req->payload_size == 0, expect: pass. */
868 	req.payload_size = 0;
869 	rqpair.qpair.ctrlr->ioccsz_bytes = 1024;
870 	rqpair.qpair.ctrlr->icdoff = 0;
871 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
872 	CU_ASSERT(rc == 0);
873 	CU_ASSERT(req.cmd.psdt == SPDK_NVME_PSDT_SGL_MPTR_CONTIG);
874 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
875 	CU_ASSERT(rdma_req.send_wr.num_sge == 1);
876 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
877 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
878 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == 0);
879 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == 0);
880 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
881 
882 	/* case 2: payload_type == NVME_PAYLOAD_TYPE_CONTIG, expect: pass. */
883 	/* icd_supported is true */
884 	rdma_req.req = NULL;
885 	rqpair.qpair.ctrlr->icdoff = 0;
886 	req.payload_offset = 0;
887 	req.payload_size = 1024;
888 	req.payload.reset_sgl_fn = NULL;
889 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
890 	CU_ASSERT(rc == 0);
891 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
892 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
893 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
894 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
895 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
896 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
897 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
898 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
899 
900 	/* icd_supported is false */
901 	rdma_req.req = NULL;
902 	rqpair.qpair.ctrlr->icdoff = 1;
903 	req.payload_offset = 0;
904 	req.payload_size = 1024;
905 	req.payload.reset_sgl_fn = NULL;
906 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
907 	CU_ASSERT(rc == 0);
908 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
909 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
910 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
911 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
912 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
913 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
914 
915 	/* case 3: payload_type == NVME_PAYLOAD_TYPE_SGL, expect: pass. */
916 	/* icd_supported is true */
917 	rdma_req.req = NULL;
918 	rqpair.qpair.ctrlr->icdoff = 0;
919 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
920 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
921 	req.payload.contig_or_cb_arg = &bio;
922 	req.qpair = &rqpair.qpair;
923 	bio.iovpos = 0;
924 	req.payload_offset = 0;
925 	req.payload_size = 1024;
926 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
927 	bio.iovs[0].iov_len = 1024;
928 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
929 	CU_ASSERT(rc == 0);
930 	CU_ASSERT(bio.iovpos == 1);
931 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
932 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
933 	CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
934 	CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
935 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
936 	CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
937 	CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
938 	CU_ASSERT(rdma_req.send_sgl[1].lkey == RDMA_UT_LKEY);
939 
940 	/* icd_supported is false */
941 	rdma_req.req = NULL;
942 	rqpair.qpair.ctrlr->icdoff = 1;
943 	req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
944 	req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
945 	req.payload.contig_or_cb_arg = &bio;
946 	req.qpair = &rqpair.qpair;
947 	bio.iovpos = 0;
948 	req.payload_offset = 0;
949 	req.payload_size = 1024;
950 	bio.iovs[0].iov_base = (void *)0xdeadbeef;
951 	bio.iovs[0].iov_len = 1024;
952 	rc = nvme_rdma_req_init(&rqpair, &req, &rdma_req);
953 	CU_ASSERT(rc == 0);
954 	CU_ASSERT(bio.iovpos == 1);
955 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
956 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
957 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
958 	CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == RDMA_UT_RKEY);
959 	CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
960 	CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
961 }
962 
963 static void
964 test_nvme_rdma_validate_cm_event(void)
965 {
966 	enum rdma_cm_event_type expected_evt_type;
967 	struct rdma_cm_event reaped_evt = {};
968 	int rc;
969 
970 	/* case 1: expected_evt_type == reaped_evt->event, expect: pass */
971 	expected_evt_type = RDMA_CM_EVENT_ADDR_RESOLVED;
972 	reaped_evt.event = RDMA_CM_EVENT_ADDR_RESOLVED;
973 
974 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
975 	CU_ASSERT(rc == 0);
976 
977 	/* case 2: expected_evt_type != RDMA_CM_EVENT_ESTABLISHED and is not equal to reaped_evt->event, expect: fail */
978 	reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
979 
980 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
981 	CU_ASSERT(rc == -EBADMSG);
982 
983 	/* case 3: expected_evt_type == RDMA_CM_EVENT_ESTABLISHED */
984 	expected_evt_type = RDMA_CM_EVENT_ESTABLISHED;
985 	/* reaped_evt->event == RDMA_CM_EVENT_REJECTED and reaped_evt->status == 10, expect: fail */
986 	reaped_evt.event = RDMA_CM_EVENT_REJECTED;
987 	reaped_evt.status = 10;
988 
989 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
990 	CU_ASSERT(rc == -ESTALE);
991 
992 	/* reaped_evt->event == RDMA_CM_EVENT_CONNECT_RESPONSE, expect: pass */
993 	reaped_evt.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
994 
995 	rc = nvme_rdma_validate_cm_event(expected_evt_type, &reaped_evt);
996 	CU_ASSERT(rc == 0);
997 }
998 
999 static void
1000 test_nvme_rdma_register_and_unregister_reqs(void)
1001 {
1002 	struct nvme_rdma_qpair rqpair = {};
1003 	struct spdk_nvmf_cmd cmds = {};
1004 	struct ibv_qp qp = {};
1005 	struct spdk_rdma_qp rdma_qp = {};
1006 	struct rdma_cm_id cm_id = {};
1007 	struct spdk_nvme_rdma_req rdma_reqs[50] = {};
1008 	int rc;
1009 
1010 	rqpair.cm_id = &cm_id;
1011 	rqpair.cmds = &cmds;
1012 	rqpair.rdma_qp = &rdma_qp;
1013 	rdma_qp.qp = &qp;
1014 	g_nvme_hooks.get_rkey = NULL;
1015 	rqpair.rdma_reqs = rdma_reqs;
1016 	/* case 1: nvme_rdma_register_req: nvme_rdma_reg_mr fail, expect: fail */
1017 	rqpair.num_entries = 0;
1018 
1019 	rc = nvme_rdma_register_reqs(&rqpair);
1020 	CU_ASSERT(rc == -ENOMEM);
1021 	CU_ASSERT(rqpair.cmd_mr.mr == NULL);
1022 
1023 	/* case 2: nvme_rdma_register_req: single entry, expect: PASS */
1024 	rqpair.num_entries = 1;
1025 
1026 	rc = nvme_rdma_register_reqs(&rqpair);
1027 	CU_ASSERT(rc == 0);
1028 	CU_ASSERT(rqpair.cmd_mr.mr == &g_rdma_mr);
1029 	CU_ASSERT(rqpair.rdma_reqs[0].send_sgl[0].lkey == rqpair.cmd_mr.mr->lkey);
1030 
1031 	/* case 3: nvme_rdma_register_req: multiple entry, expect: PASS */
1032 	rqpair.num_entries = 50;
1033 
1034 	rc = nvme_rdma_register_reqs(&rqpair);
1035 	CU_ASSERT(rc == 0);
1036 	CU_ASSERT(rqpair.cmd_mr.mr == &g_rdma_mr);
1037 	for (int i = 0; i < rqpair.num_entries; i++) {
1038 		CU_ASSERT(rqpair.rdma_reqs[i].send_sgl[0].lkey == rqpair.cmd_mr.mr->lkey);
1039 	}
1040 
1041 	/* case4: nvme_rdma_unregister_reqs, expect: PASS */
1042 	nvme_rdma_unregister_reqs(&rqpair);
1043 	CU_ASSERT(rqpair.cmd_mr.mr == NULL);
1044 }
1045 
1046 static void
1047 test_nvme_rdma_parse_addr(void)
1048 {
1049 	struct sockaddr_storage dst_addr;
1050 	int rc = 0;
1051 
1052 	memset(&dst_addr, 0, sizeof(dst_addr));
1053 	/* case1: getaddrinfo failed */
1054 	rc = nvme_rdma_parse_addr(&dst_addr, AF_INET, NULL, NULL);
1055 	CU_ASSERT(rc != 0);
1056 
1057 	/* case2: res->ai_addrlen < sizeof(*sa). Expect: Pass. */
1058 	rc = nvme_rdma_parse_addr(&dst_addr, AF_INET, "12.34.56.78", "23");
1059 	CU_ASSERT(rc == 0);
1060 	CU_ASSERT(dst_addr.ss_family == AF_INET);
1061 }
1062 
1063 static void
1064 test_nvme_rdma_qpair_init(void)
1065 {
1066 	struct nvme_rdma_qpair		rqpair = {};
1067 	struct rdma_cm_id		 cm_id = {};
1068 	struct ibv_pd				*pd = (struct ibv_pd *)0xfeedbeef;
1069 	struct ibv_qp				qp = { .pd = pd };
1070 	struct nvme_rdma_ctrlr	rctrlr = {};
1071 	int rc = 0;
1072 
1073 	rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1074 	rqpair.cm_id = &cm_id;
1075 	g_nvme_hooks.get_ibv_pd = NULL;
1076 	rqpair.qpair.poll_group = NULL;
1077 	rqpair.qpair.ctrlr = &rctrlr.ctrlr;
1078 	g_spdk_rdma_qp.qp = &qp;
1079 	MOCK_SET(spdk_rdma_get_pd, pd);
1080 
1081 	rc = nvme_rdma_qpair_init(&rqpair);
1082 	CU_ASSERT(rc == 0);
1083 
1084 	CU_ASSERT(rqpair.cm_id->context == &rqpair.qpair);
1085 	CU_ASSERT(rqpair.max_send_sge == NVME_RDMA_DEFAULT_TX_SGE);
1086 	CU_ASSERT(rqpair.max_recv_sge == NVME_RDMA_DEFAULT_RX_SGE);
1087 	CU_ASSERT(rqpair.current_num_sends == 0);
1088 	CU_ASSERT(rqpair.current_num_recvs == 0);
1089 	CU_ASSERT(rqpair.cq == (struct ibv_cq *)0xFEEDBEEF);
1090 	CU_ASSERT(rqpair.memory_domain != NULL);
1091 
1092 	MOCK_CLEAR(spdk_rdma_get_pd);
1093 }
1094 
1095 static void
1096 test_nvme_rdma_qpair_submit_request(void)
1097 {
1098 	int				rc;
1099 	struct nvme_rdma_qpair		rqpair = {};
1100 	struct spdk_nvme_ctrlr		ctrlr = {};
1101 	struct nvme_request		req = {};
1102 	struct nvme_rdma_poller		poller = {};
1103 	struct spdk_nvme_rdma_req	*rdma_req = NULL;
1104 
1105 	req.cmd.opc = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
1106 	req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
1107 	req.payload_size = 0;
1108 	rqpair.mr_map = (struct spdk_rdma_mem_map *)0xdeadbeef;
1109 	rqpair.rdma_qp = (struct spdk_rdma_qp *)0xdeadbeef;
1110 	rqpair.qpair.ctrlr = &ctrlr;
1111 	rqpair.num_entries = 1;
1112 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1113 	rqpair.poller = &poller;
1114 
1115 	rc = nvme_rdma_alloc_reqs(&rqpair);
1116 	CU_ASSERT(rc == 0);
1117 	/* Give send_wr.next a non null value */
1118 	rdma_req = TAILQ_FIRST(&rqpair.free_reqs);
1119 	SPDK_CU_ASSERT_FATAL(rdma_req != NULL);
1120 	rdma_req->send_wr.next = (void *)0xdeadbeef;
1121 
1122 	rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
1123 	CU_ASSERT(rc == 0);
1124 	CU_ASSERT(rqpair.current_num_sends == 1);
1125 	CU_ASSERT(rdma_req->send_wr.next == NULL);
1126 	TAILQ_REMOVE(&rqpair.outstanding_reqs, rdma_req, link);
1127 	CU_ASSERT(TAILQ_EMPTY(&rqpair.outstanding_reqs));
1128 
1129 	/* No request available */
1130 	rc = nvme_rdma_qpair_submit_request(&rqpair.qpair, &req);
1131 	CU_ASSERT(rc == -EAGAIN);
1132 	CU_ASSERT(rqpair.poller->stats.queued_requests == 1);
1133 
1134 	nvme_rdma_free_reqs(&rqpair);
1135 }
1136 
1137 static void
1138 test_nvme_rdma_memory_domain(void)
1139 {
1140 	struct nvme_rdma_memory_domain *domain_1 = NULL, *domain_2 = NULL, *domain_tmp;
1141 	struct ibv_pd *pd_1 = (struct ibv_pd *)0x1, *pd_2 = (struct ibv_pd *)0x2;
1142 	/* Counters below are used to check the number of created/destroyed rdma_dma_device objects.
1143 	 * Since other unit tests may create dma_devices, we can't just check that the queue is empty or not */
1144 	uint32_t dma_dev_count_start = 0, dma_dev_count = 0, dma_dev_count_end = 0;
1145 
1146 	TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
1147 		dma_dev_count_start++;
1148 	}
1149 
1150 	/* spdk_memory_domain_create failed, expect fail */
1151 	MOCK_SET(spdk_memory_domain_create, -1);
1152 	domain_1 = nvme_rdma_get_memory_domain(pd_1);
1153 	CU_ASSERT(domain_1 == NULL);
1154 	MOCK_CLEAR(spdk_memory_domain_create);
1155 
1156 	/* Normal scenario */
1157 	domain_1 = nvme_rdma_get_memory_domain(pd_1);
1158 	SPDK_CU_ASSERT_FATAL(domain_1 != NULL);
1159 	CU_ASSERT(domain_1->domain != NULL);
1160 	CU_ASSERT(domain_1->pd == pd_1);
1161 	CU_ASSERT(domain_1->ref == 1);
1162 
1163 	/* Request the same pd, ref counter increased */
1164 	CU_ASSERT(nvme_rdma_get_memory_domain(pd_1) == domain_1);
1165 	CU_ASSERT(domain_1->ref == 2);
1166 
1167 	/* Request another pd */
1168 	domain_2 = nvme_rdma_get_memory_domain(pd_2);
1169 	SPDK_CU_ASSERT_FATAL(domain_2 != NULL);
1170 	CU_ASSERT(domain_2->domain != NULL);
1171 	CU_ASSERT(domain_2->pd == pd_2);
1172 	CU_ASSERT(domain_2->ref == 1);
1173 
1174 	TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
1175 		dma_dev_count++;
1176 	}
1177 	CU_ASSERT(dma_dev_count == dma_dev_count_start + 2);
1178 
1179 	/* put domain_1, decrement refcount */
1180 	nvme_rdma_put_memory_domain(domain_1);
1181 
1182 	/* Release both devices */
1183 	CU_ASSERT(domain_2->ref == 1);
1184 	nvme_rdma_put_memory_domain(domain_1);
1185 	nvme_rdma_put_memory_domain(domain_2);
1186 
1187 	TAILQ_FOREACH(domain_tmp, &g_memory_domains, link) {
1188 		dma_dev_count_end++;
1189 	}
1190 	CU_ASSERT(dma_dev_count_start == dma_dev_count_end);
1191 }
1192 
1193 static void
1194 test_rdma_ctrlr_get_memory_domains(void)
1195 {
1196 	struct nvme_rdma_ctrlr rctrlr = {};
1197 	struct nvme_rdma_qpair rqpair = {};
1198 	struct spdk_memory_domain *domain = (struct spdk_memory_domain *)0xbaadbeef;
1199 	struct nvme_rdma_memory_domain rdma_domain = { .domain = domain };
1200 	struct spdk_memory_domain *domains[1] = {NULL};
1201 
1202 	rqpair.memory_domain = &rdma_domain;
1203 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1204 	rctrlr.ctrlr.adminq = &rqpair.qpair;
1205 
1206 	/* Test 1, input domains pointer is NULL */
1207 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 1) == 1);
1208 
1209 	/* Test 2, input array_size is 0 */
1210 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 0) == 1);
1211 	CU_ASSERT(domains[0] == NULL);
1212 
1213 	/* Test 3, both input domains pointer and array_size are NULL/0 */
1214 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, NULL, 0) == 1);
1215 
1216 	/* Test 2, input parameters are valid */
1217 	CU_ASSERT(nvme_rdma_ctrlr_get_memory_domains(&rctrlr.ctrlr, domains, 1) == 1);
1218 	CU_ASSERT(domains[0] == domain);
1219 }
1220 
1221 static void
1222 test_rdma_get_memory_translation(void)
1223 {
1224 	struct ibv_qp qp = {.pd = (struct ibv_pd *) 0xfeedbeef};
1225 	struct spdk_rdma_qp rdma_qp = {.qp = &qp};
1226 	struct nvme_rdma_qpair rqpair = {.rdma_qp = &rdma_qp};
1227 	struct spdk_nvme_ns_cmd_ext_io_opts io_opts = {
1228 		.memory_domain = (struct spdk_memory_domain *) 0xdeaddead
1229 	};
1230 	struct nvme_request req = {.payload = {.opts = &io_opts}};
1231 	struct nvme_rdma_memory_translation_ctx ctx = {
1232 		.addr = (void *) 0xBAADF00D,
1233 		.length = 0x100
1234 	};
1235 	int rc;
1236 
1237 	rqpair.memory_domain = nvme_rdma_get_memory_domain(rqpair.rdma_qp->qp->pd);
1238 	SPDK_CU_ASSERT_FATAL(rqpair.memory_domain != NULL);
1239 
1240 	/* case 1, using extended IO opts with DMA device.
1241 	 * Test 1 - spdk_dma_translate_data error, expect fail */
1242 	MOCK_SET(spdk_memory_domain_translate_data, -1);
1243 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1244 	CU_ASSERT(rc != 0);
1245 	MOCK_CLEAR(spdk_memory_domain_translate_data);
1246 
1247 	/* Test 2 - expect pass */
1248 	g_memory_translation_translation.iov_count = 1;
1249 	g_memory_translation_translation.iov.iov_base = ctx.addr + 1;
1250 	g_memory_translation_translation.iov.iov_len = ctx.length;
1251 	g_memory_translation_translation.rdma.lkey = 123;
1252 	g_memory_translation_translation.rdma.rkey = 321;
1253 
1254 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1255 	CU_ASSERT(rc == 0);
1256 	CU_ASSERT(ctx.lkey == g_memory_translation_translation.rdma.lkey);
1257 	CU_ASSERT(ctx.rkey == g_memory_translation_translation.rdma.rkey);
1258 	CU_ASSERT(ctx.addr == g_memory_translation_translation.iov.iov_base);
1259 	CU_ASSERT(ctx.length == g_memory_translation_translation.iov.iov_len);
1260 
1261 	/* case 2, using rdma translation
1262 	 * Test 1 - spdk_rdma_get_translation error, expect fail */
1263 	req.payload.opts = NULL;
1264 	MOCK_SET(spdk_rdma_get_translation, -1);
1265 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1266 	CU_ASSERT(rc != 0);
1267 	MOCK_CLEAR(spdk_rdma_get_translation);
1268 
1269 	/* Test 2 - expect pass */
1270 	rc = nvme_rdma_get_memory_translation(&req, &rqpair, &ctx);
1271 	CU_ASSERT(rc == 0);
1272 	CU_ASSERT(ctx.lkey == RDMA_UT_LKEY);
1273 	CU_ASSERT(ctx.rkey == RDMA_UT_RKEY);
1274 
1275 	/* Cleanup */
1276 	nvme_rdma_put_memory_domain(rqpair.memory_domain);
1277 }
1278 
1279 static void
1280 test_nvme_rdma_poll_group_get_qpair_by_id(void)
1281 {
1282 	const uint32_t test_qp_num = 123;
1283 	struct nvme_rdma_poll_group	group = {};
1284 	struct nvme_rdma_qpair rqpair = {};
1285 	struct spdk_rdma_qp rdma_qp = {};
1286 	struct ibv_qp qp = { .qp_num = test_qp_num };
1287 
1288 	STAILQ_INIT(&group.group.disconnected_qpairs);
1289 	STAILQ_INIT(&group.group.connected_qpairs);
1290 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1291 
1292 	/* Test 1 - Simulate case when nvme_rdma_qpair is disconnected but still in one of lists.
1293 	 * nvme_rdma_poll_group_get_qpair_by_id must return NULL */
1294 	STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
1295 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == NULL);
1296 	STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
1297 
1298 	STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
1299 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == NULL);
1300 	STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
1301 
1302 	/* Test 2 - nvme_rdma_qpair with valid rdma_qp/ibv_qp and qp_num */
1303 	rdma_qp.qp = &qp;
1304 	rqpair.rdma_qp = &rdma_qp;
1305 
1306 	STAILQ_INSERT_HEAD(&group.group.disconnected_qpairs, &rqpair.qpair, poll_group_stailq);
1307 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == &rqpair);
1308 	STAILQ_REMOVE_HEAD(&group.group.disconnected_qpairs, poll_group_stailq);
1309 
1310 	STAILQ_INSERT_HEAD(&group.group.connected_qpairs, &rqpair.qpair, poll_group_stailq);
1311 	CU_ASSERT(nvme_rdma_poll_group_get_qpair_by_id(&group, test_qp_num) == &rqpair);
1312 	STAILQ_REMOVE_HEAD(&group.group.connected_qpairs, poll_group_stailq);
1313 }
1314 
1315 static void
1316 test_nvme_rdma_ctrlr_get_max_sges(void)
1317 {
1318 	struct nvme_rdma_ctrlr	rctrlr = {};
1319 
1320 	rctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
1321 	rctrlr.max_sge = NVME_RDMA_MAX_SGL_DESCRIPTORS;
1322 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1323 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1324 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
1325 
1326 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 32;
1327 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1328 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 16);
1329 
1330 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 8;
1331 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4096;
1332 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 8);
1333 
1334 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1335 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 4;
1336 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 1);
1337 
1338 	rctrlr.ctrlr.cdata.nvmf_specific.msdbd = 16;
1339 	rctrlr.ctrlr.cdata.nvmf_specific.ioccsz = 6;
1340 	CU_ASSERT(nvme_rdma_ctrlr_get_max_sges(&rctrlr.ctrlr) == 2);
1341 }
1342 
1343 static void
1344 test_nvme_rdma_poll_group_get_stats(void)
1345 {
1346 	int rc = -1;
1347 	struct spdk_nvme_transport_poll_group_stat *tpointer = NULL;
1348 	struct nvme_rdma_poll_group tgroup = {};
1349 	struct ibv_device dev1, dev2 = {};
1350 	struct ibv_context contexts1, contexts2 = {};
1351 	struct nvme_rdma_poller *tpoller1 = NULL;
1352 	struct nvme_rdma_poller *tpoller2 = NULL;
1353 
1354 	memcpy(dev1.name, "/dev/test1", sizeof("/dev/test1"));
1355 	memcpy(dev2.name, "/dev/test2", sizeof("/dev/test2"));
1356 	contexts1.device = &dev1;
1357 	contexts2.device = &dev2;
1358 
1359 	/* Initialization */
1360 	STAILQ_INIT(&tgroup.pollers);
1361 	tpoller2 = nvme_rdma_poller_create(&tgroup, &contexts1);
1362 	SPDK_CU_ASSERT_FATAL(tpoller2 != NULL);
1363 	CU_ASSERT(tgroup.num_pollers == 1);
1364 
1365 	tpoller1 = nvme_rdma_poller_create(&tgroup, &contexts2);
1366 	SPDK_CU_ASSERT_FATAL(tpoller1 != NULL);
1367 	CU_ASSERT(tgroup.num_pollers == 2);
1368 	CU_ASSERT(&tgroup.pollers != NULL);
1369 
1370 	CU_ASSERT(tpoller1->device == &contexts2);
1371 	CU_ASSERT(tpoller2->device == &contexts1);
1372 	CU_ASSERT(strcmp(tpoller1->device->device->name, "/dev/test2") == 0);
1373 	CU_ASSERT(strcmp(tpoller2->device->device->name, "/dev/test1") == 0);
1374 	CU_ASSERT(tpoller1->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1375 	CU_ASSERT(tpoller2->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1376 	CU_ASSERT(tpoller1->required_num_wc == 0);
1377 	CU_ASSERT(tpoller2->required_num_wc == 0);
1378 
1379 	/* Test1: Invalid stats */
1380 	rc = nvme_rdma_poll_group_get_stats(NULL, &tpointer);
1381 	CU_ASSERT(rc == -EINVAL);
1382 
1383 	/* Test2: Invalid group pointer */
1384 	rc = nvme_rdma_poll_group_get_stats(&tgroup.group, NULL);
1385 	CU_ASSERT(rc == -EINVAL);
1386 
1387 	/* Test3: Success member variables should be correct */
1388 	tpoller1->stats.polls = 111;
1389 	tpoller1->stats.idle_polls = 112;
1390 	tpoller1->stats.completions = 113;
1391 	tpoller1->stats.queued_requests = 114;
1392 	tpoller1->stats.rdma_stats.send.num_submitted_wrs = 121;
1393 	tpoller1->stats.rdma_stats.send.doorbell_updates = 122;
1394 	tpoller1->stats.rdma_stats.recv.num_submitted_wrs = 131;
1395 	tpoller1->stats.rdma_stats.recv.doorbell_updates = 132;
1396 	tpoller2->stats.polls = 211;
1397 	tpoller2->stats.idle_polls = 212;
1398 	tpoller2->stats.completions = 213;
1399 	tpoller2->stats.queued_requests = 214;
1400 	tpoller2->stats.rdma_stats.send.num_submitted_wrs = 221;
1401 	tpoller2->stats.rdma_stats.send.doorbell_updates = 222;
1402 	tpoller2->stats.rdma_stats.recv.num_submitted_wrs = 231;
1403 	tpoller2->stats.rdma_stats.recv.doorbell_updates = 232;
1404 
1405 	rc = nvme_rdma_poll_group_get_stats(&tgroup.group, &tpointer);
1406 	CU_ASSERT(rc == 0);
1407 	CU_ASSERT(tpointer != NULL);
1408 	CU_ASSERT(tpointer->trtype == SPDK_NVME_TRANSPORT_RDMA);
1409 	CU_ASSERT(tpointer->rdma.num_devices == tgroup.num_pollers);
1410 	CU_ASSERT(tpointer->rdma.device_stats != NULL);
1411 
1412 	CU_ASSERT(strcmp(tpointer->rdma.device_stats[0].name, "/dev/test2") == 0);
1413 	CU_ASSERT(tpointer->rdma.device_stats[0].polls == 111);
1414 	CU_ASSERT(tpointer->rdma.device_stats[0].idle_polls == 112);
1415 	CU_ASSERT(tpointer->rdma.device_stats[0].completions == 113);
1416 	CU_ASSERT(tpointer->rdma.device_stats[0].queued_requests == 114);
1417 	CU_ASSERT(tpointer->rdma.device_stats[0].total_send_wrs == 121);
1418 	CU_ASSERT(tpointer->rdma.device_stats[0].send_doorbell_updates == 122);
1419 	CU_ASSERT(tpointer->rdma.device_stats[0].total_recv_wrs == 131);
1420 	CU_ASSERT(tpointer->rdma.device_stats[0].recv_doorbell_updates == 132);
1421 
1422 	CU_ASSERT(strcmp(tpointer->rdma.device_stats[1].name, "/dev/test1") == 0);
1423 	CU_ASSERT(tpointer->rdma.device_stats[1].polls == 211);
1424 	CU_ASSERT(tpointer->rdma.device_stats[1].idle_polls == 212);
1425 	CU_ASSERT(tpointer->rdma.device_stats[1].completions == 213);
1426 	CU_ASSERT(tpointer->rdma.device_stats[1].queued_requests == 214);
1427 	CU_ASSERT(tpointer->rdma.device_stats[1].total_send_wrs == 221);
1428 	CU_ASSERT(tpointer->rdma.device_stats[1].send_doorbell_updates == 222);
1429 	CU_ASSERT(tpointer->rdma.device_stats[1].total_recv_wrs == 231);
1430 	CU_ASSERT(tpointer->rdma.device_stats[1].recv_doorbell_updates == 232);
1431 
1432 	nvme_rdma_poll_group_free_stats(&tgroup.group, tpointer);
1433 	nvme_rdma_poll_group_free_pollers(&tgroup);
1434 }
1435 
1436 static void
1437 test_nvme_rdma_poll_group_set_cq(void)
1438 {
1439 	int rc = -1;
1440 	struct nvme_rdma_poll_group *group;
1441 	struct spdk_nvme_transport_poll_group *tgroup;
1442 	struct nvme_rdma_poller *poller;
1443 	struct nvme_rdma_qpair rqpair = {};
1444 	struct rdma_cm_id cm_id = {};
1445 
1446 	/* Case1: Test function nvme_rdma_poll_group_create */
1447 	/* Test1: Function nvme_rdma_poll_group_create success */
1448 	tgroup = nvme_rdma_poll_group_create();
1449 	SPDK_CU_ASSERT_FATAL(tgroup != NULL);
1450 
1451 	group = nvme_rdma_poll_group(tgroup);
1452 	CU_ASSERT(group != NULL);
1453 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1454 
1455 	/* Case2: Test function nvme_rdma_poll_group_set_cq */
1456 	rqpair.qpair.poll_group = tgroup;
1457 	rqpair.qpair.trtype = SPDK_NVME_TRANSPORT_RDMA;
1458 	rqpair.cm_id = &cm_id;
1459 
1460 	/* Test1: Function ibv_create_cq failed */
1461 	cm_id.verbs = (void *)0xFEEDBEEF;
1462 	MOCK_SET(ibv_create_cq, NULL);
1463 
1464 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1465 	CU_ASSERT(rc == -EINVAL);
1466 	CU_ASSERT(rqpair.cq == NULL);
1467 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1468 
1469 	MOCK_CLEAR(ibv_create_cq);
1470 
1471 	/* Test2: Unable to find a cq for qpair on poll group */
1472 	cm_id.verbs = NULL;
1473 
1474 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1475 	CU_ASSERT(rc == -EINVAL);
1476 	CU_ASSERT(rqpair.cq == NULL);
1477 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1478 
1479 	/* Test3: Match cq success, current_num_wc is enough */
1480 	MOCK_SET(ibv_create_cq, (struct ibv_cq *)0xFEEDBEEF);
1481 
1482 	cm_id.verbs = (void *)0xFEEDBEEF;
1483 	rqpair.num_entries = 0;
1484 
1485 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1486 	CU_ASSERT(rc == 0);
1487 	CU_ASSERT(rqpair.cq == (void *)0xFEEDBEEF);
1488 
1489 	poller = STAILQ_FIRST(&group->pollers);
1490 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1491 	CU_ASSERT(STAILQ_NEXT(poller, link) == NULL);
1492 	CU_ASSERT(poller->device == (struct ibv_context *)0xFEEDBEEF);
1493 	CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE);
1494 	CU_ASSERT(poller->required_num_wc == 0);
1495 	CU_ASSERT(rqpair.poller == poller);
1496 
1497 	rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
1498 
1499 	rc = nvme_rdma_poll_group_remove(tgroup, &rqpair.qpair);
1500 	CU_ASSERT(rc == 0);
1501 	CU_ASSERT(rqpair.cq == NULL);
1502 	CU_ASSERT(rqpair.poller == NULL);
1503 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1504 
1505 	rqpair.qpair.poll_group_tailq_head = &tgroup->connected_qpairs;
1506 
1507 	/* Test4: Match cq success, function ibv_resize_cq failed */
1508 	rqpair.cq = NULL;
1509 	rqpair.num_entries = DEFAULT_NVME_RDMA_CQ_SIZE - 1;
1510 	MOCK_SET(ibv_resize_cq, -1);
1511 
1512 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1513 	CU_ASSERT(rc == -EPROTO);
1514 	CU_ASSERT(STAILQ_EMPTY(&group->pollers));
1515 
1516 	/* Test5: Current_num_wc is not enough, resize success */
1517 	MOCK_SET(ibv_resize_cq, 0);
1518 
1519 	rc = nvme_rdma_poll_group_set_cq(&rqpair.qpair);
1520 	CU_ASSERT(rc == 0);
1521 
1522 	poller = STAILQ_FIRST(&group->pollers);
1523 	SPDK_CU_ASSERT_FATAL(poller != NULL);
1524 	CU_ASSERT(poller->current_num_wc == DEFAULT_NVME_RDMA_CQ_SIZE * 2);
1525 	CU_ASSERT(poller->required_num_wc == (DEFAULT_NVME_RDMA_CQ_SIZE - 1) * 2);
1526 	CU_ASSERT(rqpair.cq == poller->cq);
1527 	CU_ASSERT(rqpair.poller == poller);
1528 
1529 	rqpair.qpair.poll_group_tailq_head = &tgroup->disconnected_qpairs;
1530 
1531 	rc = nvme_rdma_poll_group_remove(tgroup, &rqpair.qpair);
1532 	CU_ASSERT(rc == 0);
1533 
1534 	rc = nvme_rdma_poll_group_destroy(tgroup);
1535 	CU_ASSERT(rc == 0);
1536 }
1537 
1538 int
1539 main(int argc, char **argv)
1540 {
1541 	CU_pSuite	suite = NULL;
1542 	unsigned int	num_failures;
1543 
1544 	CU_set_error_action(CUEA_ABORT);
1545 	CU_initialize_registry();
1546 
1547 	suite = CU_add_suite("nvme_rdma", NULL, NULL);
1548 	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
1549 	CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
1550 	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
1551 	CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
1552 	CU_ADD_TEST(suite, test_nvme_rdma_alloc_reqs);
1553 	CU_ADD_TEST(suite, test_nvme_rdma_alloc_rsps);
1554 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_create_qpair);
1555 	CU_ADD_TEST(suite, test_nvme_rdma_poller_create);
1556 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_process_cm_event);
1557 	CU_ADD_TEST(suite, test_nvme_rdma_mr_get_lkey);
1558 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_construct);
1559 	CU_ADD_TEST(suite, test_nvme_rdma_req_put_and_get);
1560 	CU_ADD_TEST(suite, test_nvme_rdma_req_init);
1561 	CU_ADD_TEST(suite, test_nvme_rdma_validate_cm_event);
1562 	CU_ADD_TEST(suite, test_nvme_rdma_register_and_unregister_reqs);
1563 	CU_ADD_TEST(suite, test_nvme_rdma_parse_addr);
1564 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_init);
1565 	CU_ADD_TEST(suite, test_nvme_rdma_qpair_submit_request);
1566 	CU_ADD_TEST(suite, test_nvme_rdma_memory_domain);
1567 	CU_ADD_TEST(suite, test_rdma_ctrlr_get_memory_domains);
1568 	CU_ADD_TEST(suite, test_rdma_get_memory_translation);
1569 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_qpair_by_id);
1570 	CU_ADD_TEST(suite, test_nvme_rdma_ctrlr_get_max_sges);
1571 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_get_stats);
1572 	CU_ADD_TEST(suite, test_nvme_rdma_poll_group_set_cq);
1573 
1574 	CU_basic_set_mode(CU_BRM_VERBOSE);
1575 	CU_basic_run_tests();
1576 	num_failures = CU_get_number_of_failures();
1577 	CU_cleanup_registry();
1578 	return num_failures;
1579 }
1580